asd
This commit is contained in:
@ -0,0 +1,488 @@
|
||||
"""Helpers to utilize existing stft / istft tests for testing `ShortTimeFFT`.
|
||||
|
||||
This module provides the functions stft_compare() and istft_compare(), which,
|
||||
compares the output between the existing (i)stft() and the shortTimeFFT based
|
||||
_(i)stft_wrapper() implementations in this module.
|
||||
|
||||
For testing add the following imports to the file ``tests/test_spectral.py``::
|
||||
|
||||
from ._scipy_spectral_test_shim import stft_compare as stft
|
||||
from ._scipy_spectral_test_shim import istft_compare as istft
|
||||
|
||||
and remove the existing imports of stft and istft.
|
||||
|
||||
The idea of these wrappers is not to provide a backward-compatible interface
|
||||
but to demonstrate that the ShortTimeFFT implementation is at least as capable
|
||||
as the existing one and delivers comparable results. Furthermore, the
|
||||
wrappers highlight the different philosophies of the implementations,
|
||||
especially in the border handling.
|
||||
"""
|
||||
import platform
|
||||
from typing import cast, Literal
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose
|
||||
|
||||
from scipy.signal import ShortTimeFFT
|
||||
from scipy.signal import csd, get_window, stft, istft
|
||||
from scipy.signal._arraytools import const_ext, even_ext, odd_ext, zero_ext
|
||||
from scipy.signal._short_time_fft import FFT_MODE_TYPE
|
||||
from scipy.signal._spectral_py import _spectral_helper, _triage_segments, \
|
||||
_median_bias
|
||||
|
||||
|
||||
def _stft_wrapper(x, fs=1.0, window='hann', nperseg=256, noverlap=None,
|
||||
nfft=None, detrend=False, return_onesided=True,
|
||||
boundary='zeros', padded=True, axis=-1, scaling='spectrum'):
|
||||
"""Wrapper for the SciPy `stft()` function based on `ShortTimeFFT` for
|
||||
unit testing.
|
||||
|
||||
Handling the boundary and padding is where `ShortTimeFFT` and `stft()`
|
||||
differ in behavior. Parts of `_spectral_helper()` were copied to mimic
|
||||
the` stft()` behavior.
|
||||
|
||||
This function is meant to be solely used by `stft_compare()`.
|
||||
"""
|
||||
if scaling not in ('psd', 'spectrum'): # same errors as in original stft:
|
||||
raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!")
|
||||
|
||||
# The following lines are taken from the original _spectral_helper():
|
||||
boundary_funcs = {'even': even_ext,
|
||||
'odd': odd_ext,
|
||||
'constant': const_ext,
|
||||
'zeros': zero_ext,
|
||||
None: None}
|
||||
|
||||
if boundary not in boundary_funcs:
|
||||
raise ValueError(f"Unknown boundary option '{boundary}', must be one" +
|
||||
f" of: {list(boundary_funcs.keys())}")
|
||||
if x.size == 0:
|
||||
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
|
||||
|
||||
if nperseg is not None: # if specified by user
|
||||
nperseg = int(nperseg)
|
||||
if nperseg < 1:
|
||||
raise ValueError('nperseg must be a positive integer')
|
||||
|
||||
# parse window; if array like, then set nperseg = win.shape
|
||||
win, nperseg = _triage_segments(window, nperseg,
|
||||
input_length=x.shape[axis])
|
||||
|
||||
if nfft is None:
|
||||
nfft = nperseg
|
||||
elif nfft < nperseg:
|
||||
raise ValueError('nfft must be greater than or equal to nperseg.')
|
||||
else:
|
||||
nfft = int(nfft)
|
||||
|
||||
if noverlap is None:
|
||||
noverlap = nperseg//2
|
||||
else:
|
||||
noverlap = int(noverlap)
|
||||
if noverlap >= nperseg:
|
||||
raise ValueError('noverlap must be less than nperseg.')
|
||||
nstep = nperseg - noverlap
|
||||
n = x.shape[axis]
|
||||
|
||||
# Padding occurs after boundary extension, so that the extended signal ends
|
||||
# in zeros, instead of introducing an impulse at the end.
|
||||
# I.e. if x = [..., 3, 2]
|
||||
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
|
||||
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
|
||||
|
||||
if boundary is not None:
|
||||
ext_func = boundary_funcs[boundary]
|
||||
# Extend by nperseg//2 in front and back:
|
||||
x = ext_func(x, nperseg//2, axis=axis)
|
||||
|
||||
if padded:
|
||||
# Pad to integer number of windowed segments
|
||||
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
|
||||
x = np.moveaxis(x, axis, -1)
|
||||
|
||||
# This is an edge case where shortTimeFFT returns one more time slice
|
||||
# than the Scipy stft() shorten to remove last time slice:
|
||||
if n % 2 == 1 and nperseg % 2 == 1 and noverlap % 2 == 1:
|
||||
x = x[..., :axis - 1]
|
||||
|
||||
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
|
||||
zeros_shape = list(x.shape[:-1]) + [nadd]
|
||||
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
|
||||
x = np.moveaxis(x, -1, axis)
|
||||
|
||||
# ... end original _spectral_helper() code.
|
||||
scale_to = {'spectrum': 'magnitude', 'psd': 'psd'}[scaling]
|
||||
|
||||
if np.iscomplexobj(x) and return_onesided:
|
||||
return_onesided = False
|
||||
# using cast() to make mypy happy:
|
||||
fft_mode = cast(FFT_MODE_TYPE, 'onesided' if return_onesided else 'twosided')
|
||||
|
||||
ST = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft,
|
||||
scale_to=scale_to, phase_shift=None)
|
||||
|
||||
k_off = nperseg // 2
|
||||
p0 = 0 # ST.lower_border_end[1] + 1
|
||||
nn = x.shape[axis] if padded else n+k_off+1
|
||||
p1 = ST.upper_border_begin(nn)[1] # ST.p_max(n) + 1
|
||||
|
||||
# This is bad hack to pass the test test_roundtrip_boundary_extension():
|
||||
if padded is True and nperseg - noverlap == 1:
|
||||
p1 -= nperseg // 2 - 1 # the reasoning behind this is not clear to me
|
||||
|
||||
detr = None if detrend is False else detrend
|
||||
Sxx = ST.stft_detrend(x, detr, p0, p1, k_offset=k_off, axis=axis)
|
||||
t = ST.t(nn, 0, p1 - p0, k_offset=0 if boundary is not None else k_off)
|
||||
if x.dtype in (np.float32, np.complex64):
|
||||
Sxx = Sxx.astype(np.complex64)
|
||||
|
||||
# workaround for test_average_all_segments() - seems to be buggy behavior:
|
||||
if boundary is None and padded is False:
|
||||
t, Sxx = t[1:-1], Sxx[..., :-2]
|
||||
t -= k_off / fs
|
||||
|
||||
return ST.f, t, Sxx
|
||||
|
||||
|
||||
def _istft_wrapper(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None,
|
||||
nfft=None, input_onesided=True, boundary=True, time_axis=-1,
|
||||
freq_axis=-2, scaling='spectrum') -> \
|
||||
tuple[np.ndarray, np.ndarray, tuple[int, int]]:
|
||||
"""Wrapper for the SciPy `istft()` function based on `ShortTimeFFT` for
|
||||
unit testing.
|
||||
|
||||
Note that only option handling is implemented as far as to handle the unit
|
||||
tests. E.g., the case ``nperseg=None`` is not handled.
|
||||
|
||||
This function is meant to be solely used by `istft_compare()`.
|
||||
"""
|
||||
# *** Lines are taken from _spectral_py.istft() ***:
|
||||
if Zxx.ndim < 2:
|
||||
raise ValueError('Input stft must be at least 2d!')
|
||||
|
||||
if freq_axis == time_axis:
|
||||
raise ValueError('Must specify differing time and frequency axes!')
|
||||
|
||||
nseg = Zxx.shape[time_axis]
|
||||
|
||||
if input_onesided:
|
||||
# Assume even segment length
|
||||
n_default = 2*(Zxx.shape[freq_axis] - 1)
|
||||
else:
|
||||
n_default = Zxx.shape[freq_axis]
|
||||
|
||||
# Check windowing parameters
|
||||
if nperseg is None:
|
||||
nperseg = n_default
|
||||
else:
|
||||
nperseg = int(nperseg)
|
||||
if nperseg < 1:
|
||||
raise ValueError('nperseg must be a positive integer')
|
||||
|
||||
if nfft is None:
|
||||
if input_onesided and (nperseg == n_default + 1):
|
||||
# Odd nperseg, no FFT padding
|
||||
nfft = nperseg
|
||||
else:
|
||||
nfft = n_default
|
||||
elif nfft < nperseg:
|
||||
raise ValueError('nfft must be greater than or equal to nperseg.')
|
||||
else:
|
||||
nfft = int(nfft)
|
||||
|
||||
if noverlap is None:
|
||||
noverlap = nperseg//2
|
||||
else:
|
||||
noverlap = int(noverlap)
|
||||
if noverlap >= nperseg:
|
||||
raise ValueError('noverlap must be less than nperseg.')
|
||||
nstep = nperseg - noverlap
|
||||
|
||||
# Get window as array
|
||||
if isinstance(window, str) or type(window) is tuple:
|
||||
win = get_window(window, nperseg)
|
||||
else:
|
||||
win = np.asarray(window)
|
||||
if len(win.shape) != 1:
|
||||
raise ValueError('window must be 1-D')
|
||||
if win.shape[0] != nperseg:
|
||||
raise ValueError(f'window must have length of {nperseg}')
|
||||
|
||||
outputlength = nperseg + (nseg-1)*nstep
|
||||
# *** End block of: Taken from _spectral_py.istft() ***
|
||||
|
||||
# Using cast() to make mypy happy:
|
||||
fft_mode = cast(FFT_MODE_TYPE, 'onesided' if input_onesided else 'twosided')
|
||||
scale_to = cast(Literal['magnitude', 'psd'],
|
||||
{'spectrum': 'magnitude', 'psd': 'psd'}[scaling])
|
||||
|
||||
ST = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft,
|
||||
scale_to=scale_to, phase_shift=None)
|
||||
|
||||
if boundary:
|
||||
j = nperseg if nperseg % 2 == 0 else nperseg - 1
|
||||
k0 = ST.k_min + nperseg // 2
|
||||
k1 = outputlength - j + k0
|
||||
else:
|
||||
raise NotImplementedError("boundary=False does not make sense with" +
|
||||
"ShortTimeFFT.istft()!")
|
||||
|
||||
x = ST.istft(Zxx, k0=k0, k1=k1, f_axis=freq_axis, t_axis=time_axis)
|
||||
t = np.arange(k1 - k0) * ST.T
|
||||
k_hi = ST.upper_border_begin(k1 - k0)[0]
|
||||
# using cast() to make mypy happy:
|
||||
return t, x, (ST.lower_border_end[0], k_hi)
|
||||
|
||||
|
||||
def _csd_wrapper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
|
||||
nfft=None, detrend='constant', return_onesided=True,
|
||||
scaling='density', axis=-1, average='mean'):
|
||||
"""Wrapper for the `csd()` function based on `ShortTimeFFT` for
|
||||
unit testing.
|
||||
"""
|
||||
freqs, _, Pxy = _csd_test_shim(x, y, fs, window, nperseg, noverlap, nfft,
|
||||
detrend, return_onesided, scaling, axis)
|
||||
|
||||
# The following code is taken from csd():
|
||||
if len(Pxy.shape) >= 2 and Pxy.size > 0:
|
||||
if Pxy.shape[-1] > 1:
|
||||
if average == 'median':
|
||||
# np.median must be passed real arrays for the desired result
|
||||
bias = _median_bias(Pxy.shape[-1])
|
||||
if np.iscomplexobj(Pxy):
|
||||
Pxy = (np.median(np.real(Pxy), axis=-1)
|
||||
+ 1j * np.median(np.imag(Pxy), axis=-1))
|
||||
else:
|
||||
Pxy = np.median(Pxy, axis=-1)
|
||||
Pxy /= bias
|
||||
elif average == 'mean':
|
||||
Pxy = Pxy.mean(axis=-1)
|
||||
else:
|
||||
raise ValueError(f'average must be "median" or "mean", got {average}')
|
||||
else:
|
||||
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
|
||||
|
||||
return freqs, Pxy
|
||||
|
||||
|
||||
def _csd_test_shim(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
|
||||
nfft=None, detrend='constant', return_onesided=True,
|
||||
scaling='density', axis=-1):
|
||||
"""Compare output of _spectral_helper() and ShortTimeFFT, more
|
||||
precisely _spect_helper_csd() for used in csd_wrapper().
|
||||
|
||||
The motivation of this function is to test if the ShortTimeFFT-based
|
||||
wrapper `_spect_helper_csd()` returns the same values as `_spectral_helper`.
|
||||
This function should only be usd by csd() in (unit) testing.
|
||||
"""
|
||||
freqs, t, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
|
||||
detrend, return_onesided, scaling, axis,
|
||||
mode='psd')
|
||||
freqs1, Pxy1 = _spect_helper_csd(x, y, fs, window, nperseg, noverlap, nfft,
|
||||
detrend, return_onesided, scaling, axis)
|
||||
|
||||
np.testing.assert_allclose(freqs1, freqs)
|
||||
amax_Pxy = max(np.abs(Pxy).max(), 1) if Pxy.size else 1
|
||||
atol = np.finfo(Pxy.dtype).resolution * amax_Pxy # needed for large Pxy
|
||||
# for c_ in range(Pxy.shape[-1]):
|
||||
# np.testing.assert_allclose(Pxy1[:, c_], Pxy[:, c_], atol=atol)
|
||||
np.testing.assert_allclose(Pxy1, Pxy, atol=atol)
|
||||
return freqs, t, Pxy
|
||||
|
||||
|
||||
def _spect_helper_csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
|
||||
nfft=None, detrend='constant', return_onesided=True,
|
||||
scaling='density', axis=-1):
|
||||
"""Wrapper for replacing _spectral_helper() by using the ShortTimeFFT
|
||||
for use by csd().
|
||||
|
||||
This function should be only used by _csd_test_shim() and is only useful
|
||||
for testing the ShortTimeFFT implementation.
|
||||
"""
|
||||
|
||||
# The following lines are taken from the original _spectral_helper():
|
||||
same_data = y is x
|
||||
axis = int(axis)
|
||||
|
||||
# Ensure we have np.arrays, get outdtype
|
||||
x = np.asarray(x)
|
||||
if not same_data:
|
||||
y = np.asarray(y)
|
||||
# outdtype = np.result_type(x, y, np.complex64)
|
||||
# else:
|
||||
# outdtype = np.result_type(x, np.complex64)
|
||||
|
||||
if not same_data:
|
||||
# Check if we can broadcast the outer axes together
|
||||
xouter = list(x.shape)
|
||||
youter = list(y.shape)
|
||||
xouter.pop(axis)
|
||||
youter.pop(axis)
|
||||
try:
|
||||
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
|
||||
except ValueError as e:
|
||||
raise ValueError('x and y cannot be broadcast together.') from e
|
||||
|
||||
if same_data:
|
||||
if x.size == 0:
|
||||
return np.empty(x.shape), np.empty(x.shape)
|
||||
else:
|
||||
if x.size == 0 or y.size == 0:
|
||||
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
|
||||
emptyout = np.moveaxis(np.empty(outshape), -1, axis)
|
||||
return emptyout, emptyout
|
||||
|
||||
if nperseg is not None: # if specified by user
|
||||
nperseg = int(nperseg)
|
||||
if nperseg < 1:
|
||||
raise ValueError('nperseg must be a positive integer')
|
||||
|
||||
# parse window; if array like, then set nperseg = win.shape
|
||||
n = x.shape[axis] if same_data else max(x.shape[axis], y.shape[axis])
|
||||
win, nperseg = _triage_segments(window, nperseg, input_length=n)
|
||||
|
||||
if nfft is None:
|
||||
nfft = nperseg
|
||||
elif nfft < nperseg:
|
||||
raise ValueError('nfft must be greater than or equal to nperseg.')
|
||||
else:
|
||||
nfft = int(nfft)
|
||||
|
||||
if noverlap is None:
|
||||
noverlap = nperseg // 2
|
||||
else:
|
||||
noverlap = int(noverlap)
|
||||
if noverlap >= nperseg:
|
||||
raise ValueError('noverlap must be less than nperseg.')
|
||||
nstep = nperseg - noverlap
|
||||
|
||||
if np.iscomplexobj(x) and return_onesided:
|
||||
return_onesided = False
|
||||
|
||||
# using cast() to make mypy happy:
|
||||
fft_mode = cast(FFT_MODE_TYPE, 'onesided' if return_onesided
|
||||
else 'twosided')
|
||||
scale = {'spectrum': 'magnitude', 'density': 'psd'}[scaling]
|
||||
SFT = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft,
|
||||
scale_to=scale, phase_shift=None)
|
||||
|
||||
# _spectral_helper() calculates X.conj()*Y instead of X*Y.conj():
|
||||
Pxy = SFT.spectrogram(y, x, detr=None if detrend is False else detrend,
|
||||
p0=0, p1=(n-noverlap)//SFT.hop, k_offset=nperseg//2,
|
||||
axis=axis).conj()
|
||||
# Note:
|
||||
# 'onesided2X' scaling of ShortTimeFFT conflicts with the
|
||||
# scaling='spectrum' parameter, since it doubles the squared magnitude,
|
||||
# which in the view of the ShortTimeFFT implementation does not make sense.
|
||||
# Hence, the doubling of the square is implemented here:
|
||||
if return_onesided:
|
||||
f_axis = Pxy.ndim - 1 + axis if axis < 0 else axis
|
||||
Pxy = np.moveaxis(Pxy, f_axis, -1)
|
||||
Pxy[..., 1:-1 if SFT.mfft % 2 == 0 else None] *= 2
|
||||
Pxy = np.moveaxis(Pxy, -1, f_axis)
|
||||
|
||||
return SFT.f, Pxy
|
||||
|
||||
|
||||
def stft_compare(x, fs=1.0, window='hann', nperseg=256, noverlap=None,
|
||||
nfft=None, detrend=False, return_onesided=True,
|
||||
boundary='zeros', padded=True, axis=-1, scaling='spectrum'):
|
||||
"""Assert that the results from the existing `stft()` and `_stft_wrapper()`
|
||||
are close to each other.
|
||||
|
||||
For comparing the STFT values an absolute tolerance of the floating point
|
||||
resolution was added to circumvent problems with the following tests:
|
||||
* For float32 the tolerances are much higher in
|
||||
TestSTFT.test_roundtrip_float32()).
|
||||
* The TestSTFT.test_roundtrip_scaling() has a high relative deviation.
|
||||
Interestingly this did not appear in Scipy 1.9.1 but only in the current
|
||||
development version.
|
||||
"""
|
||||
kw = dict(x=x, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap,
|
||||
nfft=nfft, detrend=detrend, return_onesided=return_onesided,
|
||||
boundary=boundary, padded=padded, axis=axis, scaling=scaling)
|
||||
f, t, Zxx = stft(**kw)
|
||||
f_wrapper, t_wrapper, Zxx_wrapper = _stft_wrapper(**kw)
|
||||
|
||||
e_msg_part = " of `stft_wrapper()` differ from `stft()`."
|
||||
assert_allclose(f_wrapper, f, err_msg=f"Frequencies {e_msg_part}")
|
||||
assert_allclose(t_wrapper, t, err_msg=f"Time slices {e_msg_part}")
|
||||
|
||||
# Adapted tolerances to account for:
|
||||
atol = np.finfo(Zxx.dtype).resolution * 2
|
||||
assert_allclose(Zxx_wrapper, Zxx, atol=atol,
|
||||
err_msg=f"STFT values {e_msg_part}")
|
||||
return f, t, Zxx
|
||||
|
||||
|
||||
def istft_compare(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None,
|
||||
nfft=None, input_onesided=True, boundary=True, time_axis=-1,
|
||||
freq_axis=-2, scaling='spectrum'):
|
||||
"""Assert that the results from the existing `istft()` and
|
||||
`_istft_wrapper()` are close to each other.
|
||||
|
||||
Quirks:
|
||||
* If ``boundary=False`` the comparison is skipped, since it does not
|
||||
make sense with ShortTimeFFT.istft(). Only used in test
|
||||
TestSTFT.test_roundtrip_boundary_extension().
|
||||
* If ShortTimeFFT.istft() decides the STFT is not invertible, the
|
||||
comparison is skipped, since istft() only emits a warning and does not
|
||||
return a correct result. Only used in
|
||||
ShortTimeFFT.test_roundtrip_not_nola().
|
||||
* For comparing the signals an absolute tolerance of the floating point
|
||||
resolution was added to account for the low accuracy of float32 (Occurs
|
||||
only in TestSTFT.test_roundtrip_float32()).
|
||||
"""
|
||||
kw = dict(Zxx=Zxx, fs=fs, window=window, nperseg=nperseg,
|
||||
noverlap=noverlap, nfft=nfft, input_onesided=input_onesided,
|
||||
boundary=boundary, time_axis=time_axis, freq_axis=freq_axis,
|
||||
scaling=scaling)
|
||||
|
||||
t, x = istft(**kw)
|
||||
if not boundary: # skip test_roundtrip_boundary_extension():
|
||||
return t, x # _istft_wrapper does() not implement this case
|
||||
try: # if inversion fails, istft() only emits a warning:
|
||||
t_wrapper, x_wrapper, (k_lo, k_hi) = _istft_wrapper(**kw)
|
||||
except ValueError as v: # Do nothing if inversion fails:
|
||||
if v.args[0] == "Short-time Fourier Transform not invertible!":
|
||||
return t, x
|
||||
raise v
|
||||
|
||||
e_msg_part = " of `istft_wrapper()` differ from `istft()`"
|
||||
assert_allclose(t, t_wrapper, err_msg=f"Sample times {e_msg_part}")
|
||||
|
||||
# Adapted tolerances to account for resolution loss:
|
||||
atol = np.finfo(x.dtype).resolution*2 # instead of default atol = 0
|
||||
rtol = 1e-7 # default for np.allclose()
|
||||
|
||||
# Relax atol on 32-Bit platforms a bit to pass CI tests.
|
||||
# - Not clear why there are discrepancies (in the FFT maybe?)
|
||||
# - Not sure what changed on 'i686' since earlier on those test passed
|
||||
if x.dtype == np.float32 and platform.machine() == 'i686':
|
||||
# float32 gets only used by TestSTFT.test_roundtrip_float32() so
|
||||
# we are using the tolerances from there to circumvent CI problems
|
||||
atol, rtol = 1e-4, 1e-5
|
||||
elif platform.machine() in ('aarch64', 'i386', 'i686'):
|
||||
atol = max(atol, 1e-12) # 2e-15 seems too tight for 32-Bit platforms
|
||||
|
||||
assert_allclose(x_wrapper[k_lo:k_hi], x[k_lo:k_hi], atol=atol, rtol=rtol,
|
||||
err_msg=f"Signal values {e_msg_part}")
|
||||
return t, x
|
||||
|
||||
|
||||
def csd_compare(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
|
||||
nfft=None, detrend='constant', return_onesided=True,
|
||||
scaling='density', axis=-1, average='mean'):
|
||||
"""Assert that the results from the existing `csd()` and `_csd_wrapper()`
|
||||
are close to each other. """
|
||||
kw = dict(x=x, y=y, fs=fs, window=window, nperseg=nperseg,
|
||||
noverlap=noverlap, nfft=nfft, detrend=detrend,
|
||||
return_onesided=return_onesided, scaling=scaling, axis=axis,
|
||||
average=average)
|
||||
freqs0, Pxy0 = csd(**kw)
|
||||
freqs1, Pxy1 = _csd_wrapper(**kw)
|
||||
|
||||
assert_allclose(freqs1, freqs0)
|
||||
assert_allclose(Pxy1, Pxy0)
|
||||
assert_allclose(freqs1, freqs0)
|
||||
return freqs0, Pxy0
|
||||
122
venv/lib/python3.12/site-packages/scipy/signal/tests/mpsig.py
Normal file
122
venv/lib/python3.12/site-packages/scipy/signal/tests/mpsig.py
Normal file
@ -0,0 +1,122 @@
|
||||
"""
|
||||
Some signal functions implemented using mpmath.
|
||||
"""
|
||||
|
||||
try:
|
||||
import mpmath
|
||||
except ImportError:
|
||||
mpmath = None
|
||||
|
||||
|
||||
def _prod(seq):
|
||||
"""Returns the product of the elements in the sequence `seq`."""
|
||||
p = 1
|
||||
for elem in seq:
|
||||
p *= elem
|
||||
return p
|
||||
|
||||
|
||||
def _relative_degree(z, p):
|
||||
"""
|
||||
Return relative degree of transfer function from zeros and poles.
|
||||
|
||||
This is simply len(p) - len(z), which must be nonnegative.
|
||||
A ValueError is raised if len(p) < len(z).
|
||||
"""
|
||||
degree = len(p) - len(z)
|
||||
if degree < 0:
|
||||
raise ValueError("Improper transfer function. "
|
||||
"Must have at least as many poles as zeros.")
|
||||
return degree
|
||||
|
||||
|
||||
def _zpkbilinear(z, p, k, fs):
|
||||
"""Bilinear transformation to convert a filter from analog to digital."""
|
||||
|
||||
degree = _relative_degree(z, p)
|
||||
|
||||
fs2 = 2*fs
|
||||
|
||||
# Bilinear transform the poles and zeros
|
||||
z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z]
|
||||
p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p]
|
||||
|
||||
# Any zeros that were at infinity get moved to the Nyquist frequency
|
||||
z_z.extend([-1] * degree)
|
||||
|
||||
# Compensate for gain change
|
||||
numer = _prod(fs2 - z1 for z1 in z)
|
||||
denom = _prod(fs2 - p1 for p1 in p)
|
||||
k_z = k * numer / denom
|
||||
|
||||
return z_z, p_z, k_z.real
|
||||
|
||||
|
||||
def _zpklp2lp(z, p, k, wo=1):
|
||||
"""Transform a lowpass filter to a different cutoff frequency."""
|
||||
|
||||
degree = _relative_degree(z, p)
|
||||
|
||||
# Scale all points radially from origin to shift cutoff frequency
|
||||
z_lp = [wo * z1 for z1 in z]
|
||||
p_lp = [wo * p1 for p1 in p]
|
||||
|
||||
# Each shifted pole decreases gain by wo, each shifted zero increases it.
|
||||
# Cancel out the net change to keep overall gain the same
|
||||
k_lp = k * wo**degree
|
||||
|
||||
return z_lp, p_lp, k_lp
|
||||
|
||||
|
||||
def _butter_analog_poles(n):
|
||||
"""
|
||||
Poles of an analog Butterworth lowpass filter.
|
||||
|
||||
This is the same calculation as scipy.signal.buttap(n) or
|
||||
scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used,
|
||||
and only the poles are returned.
|
||||
"""
|
||||
poles = [-mpmath.exp(1j*mpmath.pi*k/(2*n)) for k in range(-n+1, n, 2)]
|
||||
return poles
|
||||
|
||||
|
||||
def butter_lp(n, Wn):
|
||||
"""
|
||||
Lowpass Butterworth digital filter design.
|
||||
|
||||
This computes the same result as scipy.signal.butter(n, Wn, output='zpk'),
|
||||
but it uses mpmath, and the results are returned in lists instead of NumPy
|
||||
arrays.
|
||||
"""
|
||||
zeros = []
|
||||
poles = _butter_analog_poles(n)
|
||||
k = 1
|
||||
fs = 2
|
||||
warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs)
|
||||
z, p, k = _zpklp2lp(zeros, poles, k, wo=warped)
|
||||
z, p, k = _zpkbilinear(z, p, k, fs=fs)
|
||||
return z, p, k
|
||||
|
||||
|
||||
def zpkfreqz(z, p, k, worN=None):
|
||||
"""
|
||||
Frequency response of a filter in zpk format, using mpmath.
|
||||
|
||||
This is the same calculation as scipy.signal.freqz, but the input is in
|
||||
zpk format, the calculation is performed using mpath, and the results are
|
||||
returned in lists instead of NumPy arrays.
|
||||
"""
|
||||
if worN is None or isinstance(worN, int):
|
||||
N = worN or 512
|
||||
ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)]
|
||||
else:
|
||||
ws = worN
|
||||
|
||||
h = []
|
||||
for wk in ws:
|
||||
zm1 = mpmath.exp(1j * wk)
|
||||
numer = _prod([zm1 - t for t in z])
|
||||
denom = _prod([zm1 - t for t in p])
|
||||
hk = k * numer / denom
|
||||
h.append(hk)
|
||||
return ws, h
|
||||
@ -0,0 +1,111 @@
|
||||
import numpy as np
|
||||
|
||||
from numpy.testing import assert_array_equal
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from scipy.signal._arraytools import (axis_slice, axis_reverse,
|
||||
odd_ext, even_ext, const_ext, zero_ext)
|
||||
|
||||
|
||||
class TestArrayTools:
|
||||
|
||||
def test_axis_slice(self):
|
||||
a = np.arange(12).reshape(3, 4)
|
||||
|
||||
s = axis_slice(a, start=0, stop=1, axis=0)
|
||||
assert_array_equal(s, a[0:1, :])
|
||||
|
||||
s = axis_slice(a, start=-1, axis=0)
|
||||
assert_array_equal(s, a[-1:, :])
|
||||
|
||||
s = axis_slice(a, start=0, stop=1, axis=1)
|
||||
assert_array_equal(s, a[:, 0:1])
|
||||
|
||||
s = axis_slice(a, start=-1, axis=1)
|
||||
assert_array_equal(s, a[:, -1:])
|
||||
|
||||
s = axis_slice(a, start=0, step=2, axis=0)
|
||||
assert_array_equal(s, a[::2, :])
|
||||
|
||||
s = axis_slice(a, start=0, step=2, axis=1)
|
||||
assert_array_equal(s, a[:, ::2])
|
||||
|
||||
def test_axis_reverse(self):
|
||||
a = np.arange(12).reshape(3, 4)
|
||||
|
||||
r = axis_reverse(a, axis=0)
|
||||
assert_array_equal(r, a[::-1, :])
|
||||
|
||||
r = axis_reverse(a, axis=1)
|
||||
assert_array_equal(r, a[:, ::-1])
|
||||
|
||||
def test_odd_ext(self):
|
||||
a = np.array([[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
|
||||
odd = odd_ext(a, 2, axis=1)
|
||||
expected = np.array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
|
||||
[11, 10, 9, 8, 7, 6, 5, 4, 3]])
|
||||
assert_array_equal(odd, expected)
|
||||
|
||||
odd = odd_ext(a, 1, axis=0)
|
||||
expected = np.array([[-7, -4, -1, 2, 5],
|
||||
[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5],
|
||||
[17, 14, 11, 8, 5]])
|
||||
assert_array_equal(odd, expected)
|
||||
|
||||
assert_raises(ValueError, odd_ext, a, 2, axis=0)
|
||||
assert_raises(ValueError, odd_ext, a, 5, axis=1)
|
||||
|
||||
def test_even_ext(self):
|
||||
a = np.array([[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
|
||||
even = even_ext(a, 2, axis=1)
|
||||
expected = np.array([[3, 2, 1, 2, 3, 4, 5, 4, 3],
|
||||
[7, 8, 9, 8, 7, 6, 5, 6, 7]])
|
||||
assert_array_equal(even, expected)
|
||||
|
||||
even = even_ext(a, 1, axis=0)
|
||||
expected = np.array([[9, 8, 7, 6, 5],
|
||||
[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5],
|
||||
[1, 2, 3, 4, 5]])
|
||||
assert_array_equal(even, expected)
|
||||
|
||||
assert_raises(ValueError, even_ext, a, 2, axis=0)
|
||||
assert_raises(ValueError, even_ext, a, 5, axis=1)
|
||||
|
||||
def test_const_ext(self):
|
||||
a = np.array([[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
|
||||
const = const_ext(a, 2, axis=1)
|
||||
expected = np.array([[1, 1, 1, 2, 3, 4, 5, 5, 5],
|
||||
[9, 9, 9, 8, 7, 6, 5, 5, 5]])
|
||||
assert_array_equal(const, expected)
|
||||
|
||||
const = const_ext(a, 1, axis=0)
|
||||
expected = np.array([[1, 2, 3, 4, 5],
|
||||
[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
assert_array_equal(const, expected)
|
||||
|
||||
def test_zero_ext(self):
|
||||
a = np.array([[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
|
||||
zero = zero_ext(a, 2, axis=1)
|
||||
expected = np.array([[0, 0, 1, 2, 3, 4, 5, 0, 0],
|
||||
[0, 0, 9, 8, 7, 6, 5, 0, 0]])
|
||||
assert_array_equal(zero, expected)
|
||||
|
||||
zero = zero_ext(a, 1, axis=0)
|
||||
expected = np.array([[0, 0, 0, 0, 0],
|
||||
[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5],
|
||||
[0, 0, 0, 0, 0]])
|
||||
assert_array_equal(zero, expected)
|
||||
|
||||
@ -0,0 +1,186 @@
|
||||
# pylint: disable=missing-docstring
|
||||
import numpy as np
|
||||
from numpy import array
|
||||
from numpy.testing import (assert_allclose, assert_array_equal,
|
||||
assert_almost_equal)
|
||||
import pytest
|
||||
from pytest import raises
|
||||
|
||||
import scipy.signal._bsplines as bsp
|
||||
from scipy import signal
|
||||
|
||||
|
||||
class TestBSplines:
|
||||
"""Test behaviors of B-splines. Some of the values tested against were
|
||||
returned as of SciPy 1.1.0 and are included for regression testing
|
||||
purposes. Others (at integer points) are compared to theoretical
|
||||
expressions (cf. Unser, Aldroubi, Eden, IEEE TSP 1993, Table 1)."""
|
||||
|
||||
def test_spline_filter(self):
|
||||
np.random.seed(12457)
|
||||
# Test the type-error branch
|
||||
raises(TypeError, bsp.spline_filter, array([0]), 0)
|
||||
# Test the real branch
|
||||
np.random.seed(12457)
|
||||
data_array_real = np.random.rand(12, 12)
|
||||
# make the magnitude exceed 1, and make some negative
|
||||
data_array_real = 10*(1-2*data_array_real)
|
||||
result_array_real = array(
|
||||
[[-.463312621, 8.33391222, .697290949, 5.28390836,
|
||||
5.92066474, 6.59452137, 9.84406950, -8.78324188,
|
||||
7.20675750, -8.17222994, -4.38633345, 9.89917069],
|
||||
[2.67755154, 6.24192170, -3.15730578, 9.87658581,
|
||||
-9.96930425, 3.17194115, -4.50919947, 5.75423446,
|
||||
9.65979824, -8.29066885, .971416087, -2.38331897],
|
||||
[-7.08868346, 4.89887705, -1.37062289, 7.70705838,
|
||||
2.51526461, 3.65885497, 5.16786604, -8.77715342e-03,
|
||||
4.10533325, 9.04761993, -.577960351, 9.86382519],
|
||||
[-4.71444301, -1.68038985, 2.84695116, 1.14315938,
|
||||
-3.17127091, 1.91830461, 7.13779687, -5.35737482,
|
||||
-9.66586425, -9.87717456, 9.93160672, 4.71948144],
|
||||
[9.49551194, -1.92958436, 6.25427993, -9.05582911,
|
||||
3.97562282, 7.68232426, -1.04514824, -5.86021443,
|
||||
-8.43007451, 5.47528997, 2.06330736, -8.65968112],
|
||||
[-8.91720100, 8.87065356, 3.76879937, 2.56222894,
|
||||
-.828387146, 8.72288903, 6.42474741, -6.84576083,
|
||||
9.94724115, 6.90665380, -6.61084494, -9.44907391],
|
||||
[9.25196790, -.774032030, 7.05371046, -2.73505725,
|
||||
2.53953305, -1.82889155, 2.95454824, -1.66362046,
|
||||
5.72478916, -3.10287679, 1.54017123, -7.87759020],
|
||||
[-3.98464539, -2.44316992, -1.12708657, 1.01725672,
|
||||
-8.89294671, -5.42145629, -6.16370321, 2.91775492,
|
||||
9.64132208, .702499998, -2.02622392, 1.56308431],
|
||||
[-2.22050773, 7.89951554, 5.98970713, -7.35861835,
|
||||
5.45459283, -7.76427957, 3.67280490, -4.05521315,
|
||||
4.51967507, -3.22738749, -3.65080177, 3.05630155],
|
||||
[-6.21240584, -.296796126, -8.34800163, 9.21564563,
|
||||
-3.61958784, -4.77120006, -3.99454057, 1.05021988e-03,
|
||||
-6.95982829, 6.04380797, 8.43181250, -2.71653339],
|
||||
[1.19638037, 6.99718842e-02, 6.72020394, -2.13963198,
|
||||
3.75309875, -5.70076744, 5.92143551, -7.22150575,
|
||||
-3.77114594, -1.11903194, -5.39151466, 3.06620093],
|
||||
[9.86326886, 1.05134482, -7.75950607, -3.64429655,
|
||||
7.81848957, -9.02270373, 3.73399754, -4.71962549,
|
||||
-7.71144306, 3.78263161, 6.46034818, -4.43444731]])
|
||||
assert_allclose(bsp.spline_filter(data_array_real, 0),
|
||||
result_array_real)
|
||||
|
||||
def test_gauss_spline(self):
|
||||
np.random.seed(12459)
|
||||
assert_almost_equal(bsp.gauss_spline(0, 0), 1.381976597885342)
|
||||
assert_allclose(bsp.gauss_spline(array([1.]), 1), array([0.04865217]))
|
||||
|
||||
def test_gauss_spline_list(self):
|
||||
# regression test for gh-12152 (accept array_like)
|
||||
knots = [-1.0, 0.0, -1.0]
|
||||
assert_almost_equal(bsp.gauss_spline(knots, 3),
|
||||
array([0.15418033, 0.6909883, 0.15418033]))
|
||||
|
||||
def test_cspline1d(self):
|
||||
np.random.seed(12462)
|
||||
assert_array_equal(bsp.cspline1d(array([0])), [0.])
|
||||
c1d = array([1.21037185, 1.86293902, 2.98834059, 4.11660378,
|
||||
4.78893826])
|
||||
# test lamda != 0
|
||||
assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5]), 1), c1d)
|
||||
c1d0 = array([0.78683946, 2.05333735, 2.99981113, 3.94741812,
|
||||
5.21051638])
|
||||
assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5])), c1d0)
|
||||
|
||||
def test_qspline1d(self):
|
||||
np.random.seed(12463)
|
||||
assert_array_equal(bsp.qspline1d(array([0])), [0.])
|
||||
# test lamda != 0
|
||||
raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), 1.)
|
||||
raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), -1.)
|
||||
q1d0 = array([0.85350007, 2.02441743, 2.99999534, 3.97561055,
|
||||
5.14634135])
|
||||
assert_allclose(bsp.qspline1d(array([1., 2, 3, 4, 5])), q1d0)
|
||||
|
||||
def test_cspline1d_eval(self):
|
||||
np.random.seed(12464)
|
||||
assert_allclose(bsp.cspline1d_eval(array([0., 0]), [0.]), array([0.]))
|
||||
assert_array_equal(bsp.cspline1d_eval(array([1., 0, 1]), []),
|
||||
array([]))
|
||||
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
|
||||
dx = x[1]-x[0]
|
||||
newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1.,
|
||||
-0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.,
|
||||
6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12.,
|
||||
12.5]
|
||||
y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879,
|
||||
1.396, 4.094])
|
||||
cj = bsp.cspline1d(y)
|
||||
newy = array([6.203, 4.41570658, 3.514, 5.16924703, 6.864, 6.04643068,
|
||||
4.21600281, 6.04643068, 6.864, 5.16924703, 3.514,
|
||||
4.41570658, 6.203, 6.80717667, 6.759, 6.98971173, 7.433,
|
||||
7.79560142, 7.874, 7.41525761, 5.879, 3.18686814, 1.396,
|
||||
2.24889482, 4.094, 2.24889482, 1.396, 3.18686814, 5.879,
|
||||
7.41525761, 7.874, 7.79560142, 7.433, 6.98971173, 6.759,
|
||||
6.80717667, 6.203, 4.41570658])
|
||||
assert_allclose(bsp.cspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy)
|
||||
|
||||
def test_qspline1d_eval(self):
|
||||
np.random.seed(12465)
|
||||
assert_allclose(bsp.qspline1d_eval(array([0., 0]), [0.]), array([0.]))
|
||||
assert_array_equal(bsp.qspline1d_eval(array([1., 0, 1]), []),
|
||||
array([]))
|
||||
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
|
||||
dx = x[1]-x[0]
|
||||
newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1.,
|
||||
-0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.,
|
||||
6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12.,
|
||||
12.5]
|
||||
y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879,
|
||||
1.396, 4.094])
|
||||
cj = bsp.qspline1d(y)
|
||||
newy = array([6.203, 4.49418159, 3.514, 5.18390821, 6.864, 5.91436915,
|
||||
4.21600002, 5.91436915, 6.864, 5.18390821, 3.514,
|
||||
4.49418159, 6.203, 6.71900226, 6.759, 7.03980488, 7.433,
|
||||
7.81016848, 7.874, 7.32718426, 5.879, 3.23872593, 1.396,
|
||||
2.34046013, 4.094, 2.34046013, 1.396, 3.23872593, 5.879,
|
||||
7.32718426, 7.874, 7.81016848, 7.433, 7.03980488, 6.759,
|
||||
6.71900226, 6.203, 4.49418159])
|
||||
assert_allclose(bsp.qspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy)
|
||||
|
||||
|
||||
def test_sepfir2d_invalid_filter():
|
||||
filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0])
|
||||
image = np.random.rand(7, 9)
|
||||
# No error for odd lengths
|
||||
signal.sepfir2d(image, filt, filt[2:])
|
||||
|
||||
# Row or column filter must be odd
|
||||
with pytest.raises(ValueError, match="odd length"):
|
||||
signal.sepfir2d(image, filt, filt[1:])
|
||||
with pytest.raises(ValueError, match="odd length"):
|
||||
signal.sepfir2d(image, filt[1:], filt)
|
||||
|
||||
# Filters must be 1-dimensional
|
||||
with pytest.raises(ValueError, match="object too deep"):
|
||||
signal.sepfir2d(image, filt.reshape(1, -1), filt)
|
||||
with pytest.raises(ValueError, match="object too deep"):
|
||||
signal.sepfir2d(image, filt, filt.reshape(1, -1))
|
||||
|
||||
def test_sepfir2d_invalid_image():
|
||||
filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0])
|
||||
image = np.random.rand(8, 8)
|
||||
|
||||
# Image must be 2 dimensional
|
||||
with pytest.raises(ValueError, match="object too deep"):
|
||||
signal.sepfir2d(image.reshape(4, 4, 4), filt, filt)
|
||||
|
||||
with pytest.raises(ValueError, match="object of too small depth"):
|
||||
signal.sepfir2d(image[0], filt, filt)
|
||||
|
||||
|
||||
def test_cspline2d():
|
||||
np.random.seed(181819142)
|
||||
image = np.random.rand(71, 73)
|
||||
signal.cspline2d(image, 8.0)
|
||||
|
||||
|
||||
def test_qspline2d():
|
||||
np.random.seed(181819143)
|
||||
image = np.random.rand(71, 73)
|
||||
signal.qspline2d(image)
|
||||
@ -0,0 +1,416 @@
|
||||
import numpy as np
|
||||
from numpy.testing import \
|
||||
assert_array_almost_equal, assert_almost_equal, \
|
||||
assert_allclose, assert_equal
|
||||
|
||||
import pytest
|
||||
from scipy.signal import cont2discrete as c2d
|
||||
from scipy.signal import dlsim, ss2tf, ss2zpk, lsim, lti
|
||||
from scipy.signal import tf2ss, impulse, dimpulse, step, dstep
|
||||
|
||||
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
|
||||
# March 29, 2011
|
||||
|
||||
|
||||
class TestC2D:
|
||||
def test_zoh(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
ad_truth = 1.648721270700128 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.324360635350064)
|
||||
# c and d in discrete should be equal to their continuous counterparts
|
||||
dt_requested = 0.5
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cc, cd)
|
||||
assert_array_almost_equal(dc, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_foh(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
# True values are verified with Matlab
|
||||
ad_truth = 1.648721270700128 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.420839287058789)
|
||||
cd_truth = cc
|
||||
dd_truth = np.array([[0.260262223725224],
|
||||
[0.297442541400256],
|
||||
[-0.144098411624840]])
|
||||
dt_requested = 0.5
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='foh')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_impulse(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [0.0]])
|
||||
|
||||
# True values are verified with Matlab
|
||||
ad_truth = 1.648721270700128 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.412180317675032)
|
||||
cd_truth = cc
|
||||
dd_truth = np.array([[0.4375], [0.5], [0.3125]])
|
||||
dt_requested = 0.5
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='impulse')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_gbt(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
dt_requested = 0.5
|
||||
alpha = 1.0 / 3.0
|
||||
|
||||
ad_truth = 1.6 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.3)
|
||||
cd_truth = np.array([[0.9, 1.2],
|
||||
[1.2, 1.2],
|
||||
[1.2, 0.3]])
|
||||
dd_truth = np.array([[0.175],
|
||||
[0.2],
|
||||
[-0.205]])
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='gbt', alpha=alpha)
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
|
||||
def test_euler(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
ad_truth = 1.5 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.25)
|
||||
cd_truth = np.array([[0.75, 1.0],
|
||||
[1.0, 1.0],
|
||||
[1.0, 0.25]])
|
||||
dd_truth = dc
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='euler')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_backward_diff(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
ad_truth = 2.0 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.5)
|
||||
cd_truth = np.array([[1.5, 2.0],
|
||||
[2.0, 2.0],
|
||||
[2.0, 0.5]])
|
||||
dd_truth = np.array([[0.875],
|
||||
[1.0],
|
||||
[0.295]])
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='backward_diff')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
|
||||
def test_bilinear(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
ad_truth = (5.0 / 3.0) * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 1.0 / 3.0)
|
||||
cd_truth = np.array([[1.0, 4.0 / 3.0],
|
||||
[4.0 / 3.0, 4.0 / 3.0],
|
||||
[4.0 / 3.0, 1.0 / 3.0]])
|
||||
dd_truth = np.array([[0.291666666666667],
|
||||
[1.0 / 3.0],
|
||||
[-0.121666666666667]])
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='bilinear')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
# Same continuous system again, but change sampling rate
|
||||
|
||||
ad_truth = 1.4 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.2)
|
||||
cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]])
|
||||
dd_truth = np.array([[0.175], [0.2], [-0.205]])
|
||||
|
||||
dt_requested = 1.0 / 3.0
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='bilinear')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_transferfunction(self):
|
||||
numc = np.array([0.25, 0.25, 0.5])
|
||||
denc = np.array([0.75, 0.75, 1.0])
|
||||
|
||||
numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]])
|
||||
dend = np.array([1.0, -1.351394049721225, 0.606530659712634])
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
num, den, dt = c2d((numc, denc), dt_requested, method='zoh')
|
||||
|
||||
assert_array_almost_equal(numd, num)
|
||||
assert_array_almost_equal(dend, den)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_zerospolesgain(self):
|
||||
zeros_c = np.array([0.5, -0.5])
|
||||
poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
|
||||
k_c = 1.0
|
||||
|
||||
zeros_d = [1.23371727305860, 0.735356894461267]
|
||||
polls_d = [0.938148335039729 + 0.346233593780536j,
|
||||
0.938148335039729 - 0.346233593780536j]
|
||||
k_d = 1.0
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested,
|
||||
method='zoh')
|
||||
|
||||
assert_array_almost_equal(zeros_d, zeros)
|
||||
assert_array_almost_equal(polls_d, poles)
|
||||
assert_almost_equal(k_d, k)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_gbt_with_sio_tf_and_zpk(self):
|
||||
"""Test method='gbt' with alpha=0.25 for tf and zpk cases."""
|
||||
# State space coefficients for the continuous SIO system.
|
||||
A = -1.0
|
||||
B = 1.0
|
||||
C = 1.0
|
||||
D = 0.5
|
||||
|
||||
# The continuous transfer function coefficients.
|
||||
cnum, cden = ss2tf(A, B, C, D)
|
||||
|
||||
# Continuous zpk representation
|
||||
cz, cp, ck = ss2zpk(A, B, C, D)
|
||||
|
||||
h = 1.0
|
||||
alpha = 0.25
|
||||
|
||||
# Explicit formulas, in the scalar case.
|
||||
Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A)
|
||||
Bd = h * B / (1 - alpha * h * A)
|
||||
Cd = C / (1 - alpha * h * A)
|
||||
Dd = D + alpha * C * Bd
|
||||
|
||||
# Convert the explicit solution to tf
|
||||
dnum, dden = ss2tf(Ad, Bd, Cd, Dd)
|
||||
|
||||
# Compute the discrete tf using cont2discrete.
|
||||
c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha)
|
||||
|
||||
assert_allclose(dnum, c2dnum)
|
||||
assert_allclose(dden, c2dden)
|
||||
|
||||
# Convert explicit solution to zpk.
|
||||
dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd)
|
||||
|
||||
# Compute the discrete zpk using cont2discrete.
|
||||
c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha)
|
||||
|
||||
assert_allclose(dz, c2dz)
|
||||
assert_allclose(dp, c2dp)
|
||||
assert_allclose(dk, c2dk)
|
||||
|
||||
def test_discrete_approx(self):
|
||||
"""
|
||||
Test that the solution to the discrete approximation of a continuous
|
||||
system actually approximates the solution to the continuous system.
|
||||
This is an indirect test of the correctness of the implementation
|
||||
of cont2discrete.
|
||||
"""
|
||||
|
||||
def u(t):
|
||||
return np.sin(2.5 * t)
|
||||
|
||||
a = np.array([[-0.01]])
|
||||
b = np.array([[1.0]])
|
||||
c = np.array([[1.0]])
|
||||
d = np.array([[0.2]])
|
||||
x0 = 1.0
|
||||
|
||||
t = np.linspace(0, 10.0, 101)
|
||||
dt = t[1] - t[0]
|
||||
u1 = u(t)
|
||||
|
||||
# Use lsim to compute the solution to the continuous system.
|
||||
t, yout, xout = lsim((a, b, c, d), T=t, U=u1, X0=x0)
|
||||
|
||||
# Convert the continuous system to a discrete approximation.
|
||||
dsys = c2d((a, b, c, d), dt, method='bilinear')
|
||||
|
||||
# Use dlsim with the pairwise averaged input to compute the output
|
||||
# of the discrete system.
|
||||
u2 = 0.5 * (u1[:-1] + u1[1:])
|
||||
t2 = t[:-1]
|
||||
td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0)
|
||||
|
||||
# ymid is the average of consecutive terms of the "exact" output
|
||||
# computed by lsim2. This is what the discrete approximation
|
||||
# actually approximates.
|
||||
ymid = 0.5 * (yout[:-1] + yout[1:])
|
||||
|
||||
assert_allclose(yd2.ravel(), ymid, rtol=1e-4)
|
||||
|
||||
def test_simo_tf(self):
|
||||
# See gh-5753
|
||||
tf = ([[1, 0], [1, 1]], [1, 1])
|
||||
num, den, dt = c2d(tf, 0.01)
|
||||
|
||||
assert_equal(dt, 0.01) # sanity check
|
||||
assert_allclose(den, [1, -0.990404983], rtol=1e-3)
|
||||
assert_allclose(num, [[1, -1], [1, -0.99004983]], rtol=1e-3)
|
||||
|
||||
def test_multioutput(self):
|
||||
ts = 0.01 # time step
|
||||
|
||||
tf = ([[1, -3], [1, 5]], [1, 1])
|
||||
num, den, dt = c2d(tf, ts)
|
||||
|
||||
tf1 = (tf[0][0], tf[1])
|
||||
num1, den1, dt1 = c2d(tf1, ts)
|
||||
|
||||
tf2 = (tf[0][1], tf[1])
|
||||
num2, den2, dt2 = c2d(tf2, ts)
|
||||
|
||||
# Sanity checks
|
||||
assert_equal(dt, dt1)
|
||||
assert_equal(dt, dt2)
|
||||
|
||||
# Check that we get the same results
|
||||
assert_allclose(num, np.vstack((num1, num2)), rtol=1e-13)
|
||||
|
||||
# Single input, so the denominator should
|
||||
# not be multidimensional like the numerator
|
||||
assert_allclose(den, den1, rtol=1e-13)
|
||||
assert_allclose(den, den2, rtol=1e-13)
|
||||
|
||||
class TestC2dLti:
|
||||
def test_c2d_ss(self):
|
||||
# StateSpace
|
||||
A = np.array([[-0.3, 0.1], [0.2, -0.7]])
|
||||
B = np.array([[0], [1]])
|
||||
C = np.array([[1, 0]])
|
||||
D = 0
|
||||
|
||||
A_res = np.array([[0.985136404135682, 0.004876671474795],
|
||||
[0.009753342949590, 0.965629718236502]])
|
||||
B_res = np.array([[0.000122937599964], [0.049135527547844]])
|
||||
|
||||
sys_ssc = lti(A, B, C, D)
|
||||
sys_ssd = sys_ssc.to_discrete(0.05)
|
||||
|
||||
assert_allclose(sys_ssd.A, A_res)
|
||||
assert_allclose(sys_ssd.B, B_res)
|
||||
assert_allclose(sys_ssd.C, C)
|
||||
assert_allclose(sys_ssd.D, D)
|
||||
|
||||
def test_c2d_tf(self):
|
||||
|
||||
sys = lti([0.5, 0.3], [1.0, 0.4])
|
||||
sys = sys.to_discrete(0.005)
|
||||
|
||||
# Matlab results
|
||||
num_res = np.array([0.5, -0.485149004980066])
|
||||
den_res = np.array([1.0, -0.980198673306755])
|
||||
|
||||
# Somehow a lot of numerical errors
|
||||
assert_allclose(sys.den, den_res, atol=0.02)
|
||||
assert_allclose(sys.num, num_res, atol=0.02)
|
||||
|
||||
|
||||
class TestC2dInvariants:
|
||||
# Some test cases for checking the invariances.
|
||||
# Array of triplets: (system, sample time, number of samples)
|
||||
cases = [
|
||||
(tf2ss([1, 1], [1, 1.5, 1]), 0.25, 10),
|
||||
(tf2ss([1, 2], [1, 1.5, 3, 1]), 0.5, 10),
|
||||
(tf2ss(0.1, [1, 1, 2, 1]), 0.5, 10),
|
||||
]
|
||||
|
||||
# Check that systems discretized with the impulse-invariant
|
||||
# method really hold the invariant
|
||||
@pytest.mark.parametrize("sys,sample_time,samples_number", cases)
|
||||
def test_impulse_invariant(self, sys, sample_time, samples_number):
|
||||
time = np.arange(samples_number) * sample_time
|
||||
_, yout_cont = impulse(sys, T=time)
|
||||
_, yout_disc = dimpulse(c2d(sys, sample_time, method='impulse'),
|
||||
n=len(time))
|
||||
assert_allclose(sample_time * yout_cont.ravel(), yout_disc[0].ravel())
|
||||
|
||||
# Step invariant should hold for ZOH discretized systems
|
||||
@pytest.mark.parametrize("sys,sample_time,samples_number", cases)
|
||||
def test_step_invariant(self, sys, sample_time, samples_number):
|
||||
time = np.arange(samples_number) * sample_time
|
||||
_, yout_cont = step(sys, T=time)
|
||||
_, yout_disc = dstep(c2d(sys, sample_time, method='zoh'), n=len(time))
|
||||
assert_allclose(yout_cont.ravel(), yout_disc[0].ravel())
|
||||
|
||||
# Linear invariant should hold for FOH discretized systems
|
||||
@pytest.mark.parametrize("sys,sample_time,samples_number", cases)
|
||||
def test_linear_invariant(self, sys, sample_time, samples_number):
|
||||
time = np.arange(samples_number) * sample_time
|
||||
_, yout_cont, _ = lsim(sys, T=time, U=time)
|
||||
_, yout_disc, _ = dlsim(c2d(sys, sample_time, method='foh'), u=time)
|
||||
assert_allclose(yout_cont.ravel(), yout_disc.ravel())
|
||||
219
venv/lib/python3.12/site-packages/scipy/signal/tests/test_czt.py
Normal file
219
venv/lib/python3.12/site-packages/scipy/signal/tests/test_czt.py
Normal file
@ -0,0 +1,219 @@
|
||||
# This program is public domain
|
||||
# Authors: Paul Kienzle, Nadav Horesh
|
||||
'''
|
||||
A unit test module for czt.py
|
||||
'''
|
||||
import pytest
|
||||
from numpy.testing import assert_allclose
|
||||
from scipy.fft import fft
|
||||
from scipy.signal import (czt, zoom_fft, czt_points, CZT, ZoomFFT)
|
||||
import numpy as np
|
||||
|
||||
|
||||
def check_czt(x):
|
||||
# Check that czt is the equivalent of normal fft
|
||||
y = fft(x)
|
||||
y1 = czt(x)
|
||||
assert_allclose(y1, y, rtol=1e-13)
|
||||
|
||||
# Check that interpolated czt is the equivalent of normal fft
|
||||
y = fft(x, 100*len(x))
|
||||
y1 = czt(x, 100*len(x))
|
||||
assert_allclose(y1, y, rtol=1e-12)
|
||||
|
||||
|
||||
def check_zoom_fft(x):
|
||||
# Check that zoom_fft is the equivalent of normal fft
|
||||
y = fft(x)
|
||||
y1 = zoom_fft(x, [0, 2-2./len(y)], endpoint=True)
|
||||
assert_allclose(y1, y, rtol=1e-11, atol=1e-14)
|
||||
y1 = zoom_fft(x, [0, 2])
|
||||
assert_allclose(y1, y, rtol=1e-11, atol=1e-14)
|
||||
|
||||
# Test fn scalar
|
||||
y1 = zoom_fft(x, 2-2./len(y), endpoint=True)
|
||||
assert_allclose(y1, y, rtol=1e-11, atol=1e-14)
|
||||
y1 = zoom_fft(x, 2)
|
||||
assert_allclose(y1, y, rtol=1e-11, atol=1e-14)
|
||||
|
||||
# Check that zoom_fft with oversampling is equivalent to zero padding
|
||||
over = 10
|
||||
yover = fft(x, over*len(x))
|
||||
y2 = zoom_fft(x, [0, 2-2./len(yover)], m=len(yover), endpoint=True)
|
||||
assert_allclose(y2, yover, rtol=1e-12, atol=1e-10)
|
||||
y2 = zoom_fft(x, [0, 2], m=len(yover))
|
||||
assert_allclose(y2, yover, rtol=1e-12, atol=1e-10)
|
||||
|
||||
# Check that zoom_fft works on a subrange
|
||||
w = np.linspace(0, 2-2./len(x), len(x))
|
||||
f1, f2 = w[3], w[6]
|
||||
y3 = zoom_fft(x, [f1, f2], m=3*over+1, endpoint=True)
|
||||
idx3 = slice(3*over, 6*over+1)
|
||||
assert_allclose(y3, yover[idx3], rtol=1e-13)
|
||||
|
||||
|
||||
def test_1D():
|
||||
# Test of 1D version of the transforms
|
||||
|
||||
np.random.seed(0) # Deterministic randomness
|
||||
|
||||
# Random signals
|
||||
lengths = np.random.randint(8, 200, 20)
|
||||
np.append(lengths, 1)
|
||||
for length in lengths:
|
||||
x = np.random.random(length)
|
||||
check_zoom_fft(x)
|
||||
check_czt(x)
|
||||
|
||||
# Gauss
|
||||
t = np.linspace(-2, 2, 128)
|
||||
x = np.exp(-t**2/0.01)
|
||||
check_zoom_fft(x)
|
||||
|
||||
# Linear
|
||||
x = [1, 2, 3, 4, 5, 6, 7]
|
||||
check_zoom_fft(x)
|
||||
|
||||
# Check near powers of two
|
||||
check_zoom_fft(range(126-31))
|
||||
check_zoom_fft(range(127-31))
|
||||
check_zoom_fft(range(128-31))
|
||||
check_zoom_fft(range(129-31))
|
||||
check_zoom_fft(range(130-31))
|
||||
|
||||
# Check transform on n-D array input
|
||||
x = np.reshape(np.arange(3*2*28), (3, 2, 28))
|
||||
y1 = zoom_fft(x, [0, 2-2./28])
|
||||
y2 = zoom_fft(x[2, 0, :], [0, 2-2./28])
|
||||
assert_allclose(y1[2, 0], y2, rtol=1e-13, atol=1e-12)
|
||||
|
||||
y1 = zoom_fft(x, [0, 2], endpoint=False)
|
||||
y2 = zoom_fft(x[2, 0, :], [0, 2], endpoint=False)
|
||||
assert_allclose(y1[2, 0], y2, rtol=1e-13, atol=1e-12)
|
||||
|
||||
# Random (not a test condition)
|
||||
x = np.random.rand(101)
|
||||
check_zoom_fft(x)
|
||||
|
||||
# Spikes
|
||||
t = np.linspace(0, 1, 128)
|
||||
x = np.sin(2*np.pi*t*5)+np.sin(2*np.pi*t*13)
|
||||
check_zoom_fft(x)
|
||||
|
||||
# Sines
|
||||
x = np.zeros(100, dtype=complex)
|
||||
x[[1, 5, 21]] = 1
|
||||
check_zoom_fft(x)
|
||||
|
||||
# Sines plus complex component
|
||||
x += 1j*np.linspace(0, 0.5, x.shape[0])
|
||||
check_zoom_fft(x)
|
||||
|
||||
|
||||
def test_large_prime_lengths():
|
||||
np.random.seed(0) # Deterministic randomness
|
||||
for N in (101, 1009, 10007):
|
||||
x = np.random.rand(N)
|
||||
y = fft(x)
|
||||
y1 = czt(x)
|
||||
assert_allclose(y, y1, rtol=1e-12)
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_czt_vs_fft():
|
||||
np.random.seed(123)
|
||||
random_lengths = np.random.exponential(100000, size=10).astype('int')
|
||||
for n in random_lengths:
|
||||
a = np.random.randn(n)
|
||||
assert_allclose(czt(a), fft(a), rtol=1e-11)
|
||||
|
||||
|
||||
def test_empty_input():
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
czt([])
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
zoom_fft([], 0.5)
|
||||
|
||||
|
||||
def test_0_rank_input():
|
||||
with pytest.raises(IndexError, match='tuple index out of range'):
|
||||
czt(5)
|
||||
with pytest.raises(IndexError, match='tuple index out of range'):
|
||||
zoom_fft(5, 0.5)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('impulse', ([0, 0, 1], [0, 0, 1, 0, 0],
|
||||
np.concatenate((np.array([0, 0, 1]),
|
||||
np.zeros(100)))))
|
||||
@pytest.mark.parametrize('m', (1, 3, 5, 8, 101, 1021))
|
||||
@pytest.mark.parametrize('a', (1, 2, 0.5, 1.1))
|
||||
# Step that tests away from the unit circle, but not so far it explodes from
|
||||
# numerical error
|
||||
@pytest.mark.parametrize('w', (None, 0.98534 + 0.17055j))
|
||||
def test_czt_math(impulse, m, w, a):
|
||||
# z-transform of an impulse is 1 everywhere
|
||||
assert_allclose(czt(impulse[2:], m=m, w=w, a=a),
|
||||
np.ones(m), rtol=1e-10)
|
||||
|
||||
# z-transform of a delayed impulse is z**-1
|
||||
assert_allclose(czt(impulse[1:], m=m, w=w, a=a),
|
||||
czt_points(m=m, w=w, a=a)**-1, rtol=1e-10)
|
||||
|
||||
# z-transform of a 2-delayed impulse is z**-2
|
||||
assert_allclose(czt(impulse, m=m, w=w, a=a),
|
||||
czt_points(m=m, w=w, a=a)**-2, rtol=1e-10)
|
||||
|
||||
|
||||
def test_int_args():
|
||||
# Integer argument `a` was producing all 0s
|
||||
assert_allclose(abs(czt([0, 1], m=10, a=2)), 0.5*np.ones(10), rtol=1e-15)
|
||||
assert_allclose(czt_points(11, w=2), 1/(2**np.arange(11)), rtol=1e-30)
|
||||
|
||||
|
||||
def test_czt_points():
|
||||
for N in (1, 2, 3, 8, 11, 100, 101, 10007):
|
||||
assert_allclose(czt_points(N), np.exp(2j*np.pi*np.arange(N)/N),
|
||||
rtol=1e-30)
|
||||
|
||||
assert_allclose(czt_points(7, w=1), np.ones(7), rtol=1e-30)
|
||||
assert_allclose(czt_points(11, w=2.), 1/(2**np.arange(11)), rtol=1e-30)
|
||||
|
||||
func = CZT(12, m=11, w=2., a=1)
|
||||
assert_allclose(func.points(), 1/(2**np.arange(11)), rtol=1e-30)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('cls, args', [(CZT, (100,)), (ZoomFFT, (100, 0.2))])
|
||||
def test_CZT_size_mismatch(cls, args):
|
||||
# Data size doesn't match function's expected size
|
||||
myfunc = cls(*args)
|
||||
with pytest.raises(ValueError, match='CZT defined for'):
|
||||
myfunc(np.arange(5))
|
||||
|
||||
|
||||
def test_invalid_range():
|
||||
with pytest.raises(ValueError, match='2-length sequence'):
|
||||
ZoomFFT(100, [1, 2, 3])
|
||||
|
||||
|
||||
@pytest.mark.parametrize('m', [0, -11, 5.5, 4.0])
|
||||
def test_czt_points_errors(m):
|
||||
# Invalid number of points
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
czt_points(m)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('size', [0, -5, 3.5, 4.0])
|
||||
def test_nonsense_size(size):
|
||||
# Numpy and Scipy fft() give ValueError for 0 output size, so we do, too
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
CZT(size, 3)
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
ZoomFFT(size, 0.2, 3)
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
CZT(3, size)
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
ZoomFFT(3, 0.2, size)
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
czt([1, 2, 3], size)
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
zoom_fft([1, 2, 3], 0.2, size)
|
||||
@ -0,0 +1,598 @@
|
||||
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
|
||||
# April 4, 2011
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (assert_equal,
|
||||
assert_array_almost_equal, assert_array_equal,
|
||||
assert_allclose, assert_, assert_almost_equal,
|
||||
suppress_warnings)
|
||||
from pytest import raises as assert_raises
|
||||
from scipy.signal import (dlsim, dstep, dimpulse, tf2zpk, lti, dlti,
|
||||
StateSpace, TransferFunction, ZerosPolesGain,
|
||||
dfreqresp, dbode, BadCoefficients)
|
||||
|
||||
|
||||
class TestDLTI:
|
||||
|
||||
def test_dlsim(self):
|
||||
|
||||
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
|
||||
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
|
||||
c = np.asarray([[0.1, 0.3]])
|
||||
d = np.asarray([[0.0, -0.1, 0.0]])
|
||||
dt = 0.5
|
||||
|
||||
# Create an input matrix with inputs down the columns (3 cols) and its
|
||||
# respective time input vector
|
||||
u = np.hstack((np.linspace(0, 4.0, num=5)[:, np.newaxis],
|
||||
np.full((5, 1), 0.01),
|
||||
np.full((5, 1), -0.002)))
|
||||
t_in = np.linspace(0, 2.0, num=5)
|
||||
|
||||
# Define the known result
|
||||
yout_truth = np.array([[-0.001,
|
||||
-0.00073,
|
||||
0.039446,
|
||||
0.0915387,
|
||||
0.13195948]]).T
|
||||
xout_truth = np.asarray([[0, 0],
|
||||
[0.0012, 0.0005],
|
||||
[0.40233, 0.00071],
|
||||
[1.163368, -0.079327],
|
||||
[2.2402985, -0.3035679]])
|
||||
|
||||
tout, yout, xout = dlsim((a, b, c, d, dt), u, t_in)
|
||||
|
||||
assert_array_almost_equal(yout_truth, yout)
|
||||
assert_array_almost_equal(xout_truth, xout)
|
||||
assert_array_almost_equal(t_in, tout)
|
||||
|
||||
# Make sure input with single-dimension doesn't raise error
|
||||
dlsim((1, 2, 3), 4)
|
||||
|
||||
# Interpolated control - inputs should have different time steps
|
||||
# than the discrete model uses internally
|
||||
u_sparse = u[[0, 4], :]
|
||||
t_sparse = np.asarray([0.0, 2.0])
|
||||
|
||||
tout, yout, xout = dlsim((a, b, c, d, dt), u_sparse, t_sparse)
|
||||
|
||||
assert_array_almost_equal(yout_truth, yout)
|
||||
assert_array_almost_equal(xout_truth, xout)
|
||||
assert_equal(len(tout), yout.shape[0])
|
||||
|
||||
# Transfer functions (assume dt = 0.5)
|
||||
num = np.asarray([1.0, -0.1])
|
||||
den = np.asarray([0.3, 1.0, 0.2])
|
||||
yout_truth = np.array([[0.0,
|
||||
0.0,
|
||||
3.33333333333333,
|
||||
-4.77777777777778,
|
||||
23.0370370370370]]).T
|
||||
|
||||
# Assume use of the first column of the control input built earlier
|
||||
tout, yout = dlsim((num, den, 0.5), u[:, 0], t_in)
|
||||
|
||||
assert_array_almost_equal(yout, yout_truth)
|
||||
assert_array_almost_equal(t_in, tout)
|
||||
|
||||
# Retest the same with a 1-D input vector
|
||||
uflat = np.asarray(u[:, 0])
|
||||
uflat = uflat.reshape((5,))
|
||||
tout, yout = dlsim((num, den, 0.5), uflat, t_in)
|
||||
|
||||
assert_array_almost_equal(yout, yout_truth)
|
||||
assert_array_almost_equal(t_in, tout)
|
||||
|
||||
# zeros-poles-gain representation
|
||||
zd = np.array([0.5, -0.5])
|
||||
pd = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
|
||||
k = 1.0
|
||||
yout_truth = np.array([[0.0, 1.0, 2.0, 2.25, 2.5]]).T
|
||||
|
||||
tout, yout = dlsim((zd, pd, k, 0.5), u[:, 0], t_in)
|
||||
|
||||
assert_array_almost_equal(yout, yout_truth)
|
||||
assert_array_almost_equal(t_in, tout)
|
||||
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dlsim, system, u)
|
||||
|
||||
def test_dstep(self):
|
||||
|
||||
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
|
||||
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
|
||||
c = np.asarray([[0.1, 0.3]])
|
||||
d = np.asarray([[0.0, -0.1, 0.0]])
|
||||
dt = 0.5
|
||||
|
||||
# Because b.shape[1] == 3, dstep should result in a tuple of three
|
||||
# result vectors
|
||||
yout_step_truth = (np.asarray([0.0, 0.04, 0.052, 0.0404, 0.00956,
|
||||
-0.036324, -0.093318, -0.15782348,
|
||||
-0.226628324, -0.2969374948]),
|
||||
np.asarray([-0.1, -0.075, -0.058, -0.04815,
|
||||
-0.04453, -0.0461895, -0.0521812,
|
||||
-0.061588875, -0.073549579,
|
||||
-0.08727047595]),
|
||||
np.asarray([0.0, -0.01, -0.013, -0.0101, -0.00239,
|
||||
0.009081, 0.0233295, 0.03945587,
|
||||
0.056657081, 0.0742343737]))
|
||||
|
||||
tout, yout = dstep((a, b, c, d, dt), n=10)
|
||||
|
||||
assert_equal(len(yout), 3)
|
||||
|
||||
for i in range(0, len(yout)):
|
||||
assert_equal(yout[i].shape[0], 10)
|
||||
assert_array_almost_equal(yout[i].flatten(), yout_step_truth[i])
|
||||
|
||||
# Check that the other two inputs (tf, zpk) will work as well
|
||||
tfin = ([1.0], [1.0, 1.0], 0.5)
|
||||
yout_tfstep = np.asarray([0.0, 1.0, 0.0])
|
||||
tout, yout = dstep(tfin, n=3)
|
||||
assert_equal(len(yout), 1)
|
||||
assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
|
||||
|
||||
zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
|
||||
tout, yout = dstep(zpkin, n=3)
|
||||
assert_equal(len(yout), 1)
|
||||
assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
|
||||
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dstep, system)
|
||||
|
||||
def test_dimpulse(self):
|
||||
|
||||
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
|
||||
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
|
||||
c = np.asarray([[0.1, 0.3]])
|
||||
d = np.asarray([[0.0, -0.1, 0.0]])
|
||||
dt = 0.5
|
||||
|
||||
# Because b.shape[1] == 3, dimpulse should result in a tuple of three
|
||||
# result vectors
|
||||
yout_imp_truth = (np.asarray([0.0, 0.04, 0.012, -0.0116, -0.03084,
|
||||
-0.045884, -0.056994, -0.06450548,
|
||||
-0.068804844, -0.0703091708]),
|
||||
np.asarray([-0.1, 0.025, 0.017, 0.00985, 0.00362,
|
||||
-0.0016595, -0.0059917, -0.009407675,
|
||||
-0.011960704, -0.01372089695]),
|
||||
np.asarray([0.0, -0.01, -0.003, 0.0029, 0.00771,
|
||||
0.011471, 0.0142485, 0.01612637,
|
||||
0.017201211, 0.0175772927]))
|
||||
|
||||
tout, yout = dimpulse((a, b, c, d, dt), n=10)
|
||||
|
||||
assert_equal(len(yout), 3)
|
||||
|
||||
for i in range(0, len(yout)):
|
||||
assert_equal(yout[i].shape[0], 10)
|
||||
assert_array_almost_equal(yout[i].flatten(), yout_imp_truth[i])
|
||||
|
||||
# Check that the other two inputs (tf, zpk) will work as well
|
||||
tfin = ([1.0], [1.0, 1.0], 0.5)
|
||||
yout_tfimpulse = np.asarray([0.0, 1.0, -1.0])
|
||||
tout, yout = dimpulse(tfin, n=3)
|
||||
assert_equal(len(yout), 1)
|
||||
assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
|
||||
|
||||
zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
|
||||
tout, yout = dimpulse(zpkin, n=3)
|
||||
assert_equal(len(yout), 1)
|
||||
assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
|
||||
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dimpulse, system)
|
||||
|
||||
def test_dlsim_trivial(self):
|
||||
a = np.array([[0.0]])
|
||||
b = np.array([[0.0]])
|
||||
c = np.array([[0.0]])
|
||||
d = np.array([[0.0]])
|
||||
n = 5
|
||||
u = np.zeros(n).reshape(-1, 1)
|
||||
tout, yout, xout = dlsim((a, b, c, d, 1), u)
|
||||
assert_array_equal(tout, np.arange(float(n)))
|
||||
assert_array_equal(yout, np.zeros((n, 1)))
|
||||
assert_array_equal(xout, np.zeros((n, 1)))
|
||||
|
||||
def test_dlsim_simple1d(self):
|
||||
a = np.array([[0.5]])
|
||||
b = np.array([[0.0]])
|
||||
c = np.array([[1.0]])
|
||||
d = np.array([[0.0]])
|
||||
n = 5
|
||||
u = np.zeros(n).reshape(-1, 1)
|
||||
tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
|
||||
assert_array_equal(tout, np.arange(float(n)))
|
||||
expected = (0.5 ** np.arange(float(n))).reshape(-1, 1)
|
||||
assert_array_equal(yout, expected)
|
||||
assert_array_equal(xout, expected)
|
||||
|
||||
def test_dlsim_simple2d(self):
|
||||
lambda1 = 0.5
|
||||
lambda2 = 0.25
|
||||
a = np.array([[lambda1, 0.0],
|
||||
[0.0, lambda2]])
|
||||
b = np.array([[0.0],
|
||||
[0.0]])
|
||||
c = np.array([[1.0, 0.0],
|
||||
[0.0, 1.0]])
|
||||
d = np.array([[0.0],
|
||||
[0.0]])
|
||||
n = 5
|
||||
u = np.zeros(n).reshape(-1, 1)
|
||||
tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
|
||||
assert_array_equal(tout, np.arange(float(n)))
|
||||
# The analytical solution:
|
||||
expected = (np.array([lambda1, lambda2]) **
|
||||
np.arange(float(n)).reshape(-1, 1))
|
||||
assert_array_equal(yout, expected)
|
||||
assert_array_equal(xout, expected)
|
||||
|
||||
def test_more_step_and_impulse(self):
|
||||
lambda1 = 0.5
|
||||
lambda2 = 0.75
|
||||
a = np.array([[lambda1, 0.0],
|
||||
[0.0, lambda2]])
|
||||
b = np.array([[1.0, 0.0],
|
||||
[0.0, 1.0]])
|
||||
c = np.array([[1.0, 1.0]])
|
||||
d = np.array([[0.0, 0.0]])
|
||||
|
||||
n = 10
|
||||
|
||||
# Check a step response.
|
||||
ts, ys = dstep((a, b, c, d, 1), n=n)
|
||||
|
||||
# Create the exact step response.
|
||||
stp0 = (1.0 / (1 - lambda1)) * (1.0 - lambda1 ** np.arange(n))
|
||||
stp1 = (1.0 / (1 - lambda2)) * (1.0 - lambda2 ** np.arange(n))
|
||||
|
||||
assert_allclose(ys[0][:, 0], stp0)
|
||||
assert_allclose(ys[1][:, 0], stp1)
|
||||
|
||||
# Check an impulse response with an initial condition.
|
||||
x0 = np.array([1.0, 1.0])
|
||||
ti, yi = dimpulse((a, b, c, d, 1), n=n, x0=x0)
|
||||
|
||||
# Create the exact impulse response.
|
||||
imp = (np.array([lambda1, lambda2]) **
|
||||
np.arange(-1, n + 1).reshape(-1, 1))
|
||||
imp[0, :] = 0.0
|
||||
# Analytical solution to impulse response
|
||||
y0 = imp[:n, 0] + np.dot(imp[1:n + 1, :], x0)
|
||||
y1 = imp[:n, 1] + np.dot(imp[1:n + 1, :], x0)
|
||||
|
||||
assert_allclose(yi[0][:, 0], y0)
|
||||
assert_allclose(yi[1][:, 0], y1)
|
||||
|
||||
# Check that dt=0.1, n=3 gives 3 time values.
|
||||
system = ([1.0], [1.0, -0.5], 0.1)
|
||||
t, (y,) = dstep(system, n=3)
|
||||
assert_allclose(t, [0, 0.1, 0.2])
|
||||
assert_array_equal(y.T, [[0, 1.0, 1.5]])
|
||||
t, (y,) = dimpulse(system, n=3)
|
||||
assert_allclose(t, [0, 0.1, 0.2])
|
||||
assert_array_equal(y.T, [[0, 1, 0.5]])
|
||||
|
||||
|
||||
class TestDlti:
|
||||
def test_dlti_instantiation(self):
|
||||
# Test that lti can be instantiated.
|
||||
|
||||
dt = 0.05
|
||||
# TransferFunction
|
||||
s = dlti([1], [-1], dt=dt)
|
||||
assert_(isinstance(s, TransferFunction))
|
||||
assert_(isinstance(s, dlti))
|
||||
assert_(not isinstance(s, lti))
|
||||
assert_equal(s.dt, dt)
|
||||
|
||||
# ZerosPolesGain
|
||||
s = dlti(np.array([]), np.array([-1]), 1, dt=dt)
|
||||
assert_(isinstance(s, ZerosPolesGain))
|
||||
assert_(isinstance(s, dlti))
|
||||
assert_(not isinstance(s, lti))
|
||||
assert_equal(s.dt, dt)
|
||||
|
||||
# StateSpace
|
||||
s = dlti([1], [-1], 1, 3, dt=dt)
|
||||
assert_(isinstance(s, StateSpace))
|
||||
assert_(isinstance(s, dlti))
|
||||
assert_(not isinstance(s, lti))
|
||||
assert_equal(s.dt, dt)
|
||||
|
||||
# Number of inputs
|
||||
assert_raises(ValueError, dlti, 1)
|
||||
assert_raises(ValueError, dlti, 1, 1, 1, 1, 1)
|
||||
|
||||
|
||||
class TestStateSpaceDisc:
|
||||
def test_initialization(self):
|
||||
# Check that all initializations work
|
||||
dt = 0.05
|
||||
StateSpace(1, 1, 1, 1, dt=dt)
|
||||
StateSpace([1], [2], [3], [4], dt=dt)
|
||||
StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]),
|
||||
np.array([[1, 0]]), np.array([[0]]), dt=dt)
|
||||
StateSpace(1, 1, 1, 1, dt=True)
|
||||
|
||||
def test_conversion(self):
|
||||
# Check the conversion functions
|
||||
s = StateSpace(1, 2, 3, 4, dt=0.05)
|
||||
assert_(isinstance(s.to_ss(), StateSpace))
|
||||
assert_(isinstance(s.to_tf(), TransferFunction))
|
||||
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
|
||||
|
||||
# Make sure copies work
|
||||
assert_(StateSpace(s) is not s)
|
||||
assert_(s.to_ss() is not s)
|
||||
|
||||
def test_properties(self):
|
||||
# Test setters/getters for cross class properties.
|
||||
# This implicitly tests to_tf() and to_zpk()
|
||||
|
||||
# Getters
|
||||
s = StateSpace(1, 1, 1, 1, dt=0.05)
|
||||
assert_equal(s.poles, [1])
|
||||
assert_equal(s.zeros, [0])
|
||||
|
||||
|
||||
class TestTransferFunction:
|
||||
def test_initialization(self):
|
||||
# Check that all initializations work
|
||||
dt = 0.05
|
||||
TransferFunction(1, 1, dt=dt)
|
||||
TransferFunction([1], [2], dt=dt)
|
||||
TransferFunction(np.array([1]), np.array([2]), dt=dt)
|
||||
TransferFunction(1, 1, dt=True)
|
||||
|
||||
def test_conversion(self):
|
||||
# Check the conversion functions
|
||||
s = TransferFunction([1, 0], [1, -1], dt=0.05)
|
||||
assert_(isinstance(s.to_ss(), StateSpace))
|
||||
assert_(isinstance(s.to_tf(), TransferFunction))
|
||||
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
|
||||
|
||||
# Make sure copies work
|
||||
assert_(TransferFunction(s) is not s)
|
||||
assert_(s.to_tf() is not s)
|
||||
|
||||
def test_properties(self):
|
||||
# Test setters/getters for cross class properties.
|
||||
# This implicitly tests to_ss() and to_zpk()
|
||||
|
||||
# Getters
|
||||
s = TransferFunction([1, 0], [1, -1], dt=0.05)
|
||||
assert_equal(s.poles, [1])
|
||||
assert_equal(s.zeros, [0])
|
||||
|
||||
|
||||
class TestZerosPolesGain:
|
||||
def test_initialization(self):
|
||||
# Check that all initializations work
|
||||
dt = 0.05
|
||||
ZerosPolesGain(1, 1, 1, dt=dt)
|
||||
ZerosPolesGain([1], [2], 1, dt=dt)
|
||||
ZerosPolesGain(np.array([1]), np.array([2]), 1, dt=dt)
|
||||
ZerosPolesGain(1, 1, 1, dt=True)
|
||||
|
||||
def test_conversion(self):
|
||||
# Check the conversion functions
|
||||
s = ZerosPolesGain(1, 2, 3, dt=0.05)
|
||||
assert_(isinstance(s.to_ss(), StateSpace))
|
||||
assert_(isinstance(s.to_tf(), TransferFunction))
|
||||
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
|
||||
|
||||
# Make sure copies work
|
||||
assert_(ZerosPolesGain(s) is not s)
|
||||
assert_(s.to_zpk() is not s)
|
||||
|
||||
|
||||
class Test_dfreqresp:
|
||||
|
||||
def test_manual(self):
|
||||
# Test dfreqresp() real part calculation (manual sanity check).
|
||||
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
|
||||
system = TransferFunction(1, [1, -0.2], dt=0.1)
|
||||
w = [0.1, 1, 10]
|
||||
w, H = dfreqresp(system, w=w)
|
||||
|
||||
# test real
|
||||
expected_re = [1.2383, 0.4130, -0.7553]
|
||||
assert_almost_equal(H.real, expected_re, decimal=4)
|
||||
|
||||
# test imag
|
||||
expected_im = [-0.1555, -1.0214, 0.3955]
|
||||
assert_almost_equal(H.imag, expected_im, decimal=4)
|
||||
|
||||
def test_auto(self):
|
||||
# Test dfreqresp() real part calculation.
|
||||
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
|
||||
system = TransferFunction(1, [1, -0.2], dt=0.1)
|
||||
w = [0.1, 1, 10, 100]
|
||||
w, H = dfreqresp(system, w=w)
|
||||
jw = np.exp(w * 1j)
|
||||
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
|
||||
|
||||
# test real
|
||||
expected_re = y.real
|
||||
assert_almost_equal(H.real, expected_re)
|
||||
|
||||
# test imag
|
||||
expected_im = y.imag
|
||||
assert_almost_equal(H.imag, expected_im)
|
||||
|
||||
def test_freq_range(self):
|
||||
# Test that freqresp() finds a reasonable frequency range.
|
||||
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
|
||||
# Expected range is from 0.01 to 10.
|
||||
system = TransferFunction(1, [1, -0.2], dt=0.1)
|
||||
n = 10
|
||||
expected_w = np.linspace(0, np.pi, 10, endpoint=False)
|
||||
w, H = dfreqresp(system, n=n)
|
||||
assert_almost_equal(w, expected_w)
|
||||
|
||||
def test_pole_one(self):
|
||||
# Test that freqresp() doesn't fail on a system with a pole at 0.
|
||||
# integrator, pole at zero: H(s) = 1 / s
|
||||
system = TransferFunction([1], [1, -1], dt=0.1)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning, message="divide by zero")
|
||||
sup.filter(RuntimeWarning, message="invalid value encountered")
|
||||
w, H = dfreqresp(system, n=2)
|
||||
assert_equal(w[0], 0.) # a fail would give not-a-number
|
||||
|
||||
def test_error(self):
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dfreqresp, system)
|
||||
|
||||
def test_from_state_space(self):
|
||||
# H(z) = 2 / z^3 - 0.5 * z^2
|
||||
|
||||
system_TF = dlti([2], [1, -0.5, 0, 0])
|
||||
|
||||
A = np.array([[0.5, 0, 0],
|
||||
[1, 0, 0],
|
||||
[0, 1, 0]])
|
||||
B = np.array([[1, 0, 0]]).T
|
||||
C = np.array([[0, 0, 2]])
|
||||
D = 0
|
||||
|
||||
system_SS = dlti(A, B, C, D)
|
||||
w = 10.0**np.arange(-3,0,.5)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(BadCoefficients)
|
||||
w1, H1 = dfreqresp(system_TF, w=w)
|
||||
w2, H2 = dfreqresp(system_SS, w=w)
|
||||
|
||||
assert_almost_equal(H1, H2)
|
||||
|
||||
def test_from_zpk(self):
|
||||
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
|
||||
system_ZPK = dlti([],[0.2],0.3)
|
||||
system_TF = dlti(0.3, [1, -0.2])
|
||||
w = [0.1, 1, 10, 100]
|
||||
w1, H1 = dfreqresp(system_ZPK, w=w)
|
||||
w2, H2 = dfreqresp(system_TF, w=w)
|
||||
assert_almost_equal(H1, H2)
|
||||
|
||||
|
||||
class Test_bode:
|
||||
|
||||
def test_manual(self):
|
||||
# Test bode() magnitude calculation (manual sanity check).
|
||||
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
|
||||
dt = 0.1
|
||||
system = TransferFunction(0.3, [1, -0.2], dt=dt)
|
||||
w = [0.1, 0.5, 1, np.pi]
|
||||
w2, mag, phase = dbode(system, w=w)
|
||||
|
||||
# Test mag
|
||||
expected_mag = [-8.5329, -8.8396, -9.6162, -12.0412]
|
||||
assert_almost_equal(mag, expected_mag, decimal=4)
|
||||
|
||||
# Test phase
|
||||
expected_phase = [-7.1575, -35.2814, -67.9809, -180.0000]
|
||||
assert_almost_equal(phase, expected_phase, decimal=4)
|
||||
|
||||
# Test frequency
|
||||
assert_equal(np.array(w) / dt, w2)
|
||||
|
||||
def test_auto(self):
|
||||
# Test bode() magnitude calculation.
|
||||
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
|
||||
system = TransferFunction(0.3, [1, -0.2], dt=0.1)
|
||||
w = np.array([0.1, 0.5, 1, np.pi])
|
||||
w2, mag, phase = dbode(system, w=w)
|
||||
jw = np.exp(w * 1j)
|
||||
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
|
||||
|
||||
# Test mag
|
||||
expected_mag = 20.0 * np.log10(abs(y))
|
||||
assert_almost_equal(mag, expected_mag)
|
||||
|
||||
# Test phase
|
||||
expected_phase = np.rad2deg(np.angle(y))
|
||||
assert_almost_equal(phase, expected_phase)
|
||||
|
||||
def test_range(self):
|
||||
# Test that bode() finds a reasonable frequency range.
|
||||
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
|
||||
dt = 0.1
|
||||
system = TransferFunction(0.3, [1, -0.2], dt=0.1)
|
||||
n = 10
|
||||
# Expected range is from 0.01 to 10.
|
||||
expected_w = np.linspace(0, np.pi, n, endpoint=False) / dt
|
||||
w, mag, phase = dbode(system, n=n)
|
||||
assert_almost_equal(w, expected_w)
|
||||
|
||||
def test_pole_one(self):
|
||||
# Test that freqresp() doesn't fail on a system with a pole at 0.
|
||||
# integrator, pole at zero: H(s) = 1 / s
|
||||
system = TransferFunction([1], [1, -1], dt=0.1)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning, message="divide by zero")
|
||||
sup.filter(RuntimeWarning, message="invalid value encountered")
|
||||
w, mag, phase = dbode(system, n=2)
|
||||
assert_equal(w[0], 0.) # a fail would give not-a-number
|
||||
|
||||
def test_imaginary(self):
|
||||
# bode() should not fail on a system with pure imaginary poles.
|
||||
# The test passes if bode doesn't raise an exception.
|
||||
system = TransferFunction([1], [1, 0, 100], dt=0.1)
|
||||
dbode(system, n=2)
|
||||
|
||||
def test_error(self):
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dbode, system)
|
||||
|
||||
|
||||
class TestTransferFunctionZConversion:
|
||||
"""Test private conversions between 'z' and 'z**-1' polynomials."""
|
||||
|
||||
def test_full(self):
|
||||
# Numerator and denominator same order
|
||||
num = [2, 3, 4]
|
||||
den = [5, 6, 7]
|
||||
num2, den2 = TransferFunction._z_to_zinv(num, den)
|
||||
assert_equal(num, num2)
|
||||
assert_equal(den, den2)
|
||||
|
||||
num2, den2 = TransferFunction._zinv_to_z(num, den)
|
||||
assert_equal(num, num2)
|
||||
assert_equal(den, den2)
|
||||
|
||||
def test_numerator(self):
|
||||
# Numerator lower order than denominator
|
||||
num = [2, 3]
|
||||
den = [5, 6, 7]
|
||||
num2, den2 = TransferFunction._z_to_zinv(num, den)
|
||||
assert_equal([0, 2, 3], num2)
|
||||
assert_equal(den, den2)
|
||||
|
||||
num2, den2 = TransferFunction._zinv_to_z(num, den)
|
||||
assert_equal([2, 3, 0], num2)
|
||||
assert_equal(den, den2)
|
||||
|
||||
def test_denominator(self):
|
||||
# Numerator higher order than denominator
|
||||
num = [2, 3, 4]
|
||||
den = [5, 6]
|
||||
num2, den2 = TransferFunction._z_to_zinv(num, den)
|
||||
assert_equal(num, num2)
|
||||
assert_equal([0, 5, 6], den2)
|
||||
|
||||
num2, den2 = TransferFunction._zinv_to_z(num, den)
|
||||
assert_equal(num, num2)
|
||||
assert_equal([5, 6, 0], den2)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,647 @@
|
||||
import numpy as np
|
||||
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
|
||||
assert_equal, assert_,
|
||||
assert_allclose, assert_warns)
|
||||
from pytest import raises as assert_raises
|
||||
import pytest
|
||||
|
||||
from scipy.fft import fft
|
||||
from scipy.special import sinc
|
||||
from scipy.signal import kaiser_beta, kaiser_atten, kaiserord, \
|
||||
firwin, firwin2, freqz, remez, firls, minimum_phase
|
||||
|
||||
|
||||
def test_kaiser_beta():
|
||||
b = kaiser_beta(58.7)
|
||||
assert_almost_equal(b, 0.1102 * 50.0)
|
||||
b = kaiser_beta(22.0)
|
||||
assert_almost_equal(b, 0.5842 + 0.07886)
|
||||
b = kaiser_beta(21.0)
|
||||
assert_equal(b, 0.0)
|
||||
b = kaiser_beta(10.0)
|
||||
assert_equal(b, 0.0)
|
||||
|
||||
|
||||
def test_kaiser_atten():
|
||||
a = kaiser_atten(1, 1.0)
|
||||
assert_equal(a, 7.95)
|
||||
a = kaiser_atten(2, 1/np.pi)
|
||||
assert_equal(a, 2.285 + 7.95)
|
||||
|
||||
|
||||
def test_kaiserord():
|
||||
assert_raises(ValueError, kaiserord, 1.0, 1.0)
|
||||
numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi)
|
||||
assert_equal((numtaps, beta), (2, 0.0))
|
||||
|
||||
|
||||
class TestFirwin:
|
||||
|
||||
def check_response(self, h, expected_response, tol=.05):
|
||||
N = len(h)
|
||||
alpha = 0.5 * (N-1)
|
||||
m = np.arange(0,N) - alpha # time indices of taps
|
||||
for freq, expected in expected_response:
|
||||
actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq)))
|
||||
mse = abs(actual-expected)**2
|
||||
assert_(mse < tol, f'response not as expected, mse={mse:g} > {tol:g}')
|
||||
|
||||
def test_response(self):
|
||||
N = 51
|
||||
f = .5
|
||||
# increase length just to try even/odd
|
||||
h = firwin(N, f) # low-pass from 0 to f
|
||||
self.check_response(h, [(.25,1), (.75,0)])
|
||||
|
||||
h = firwin(N+1, f, window='nuttall') # specific window
|
||||
self.check_response(h, [(.25,1), (.75,0)])
|
||||
|
||||
h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass
|
||||
self.check_response(h, [(.25,0), (.75,1)])
|
||||
|
||||
f1, f2, f3, f4 = .2, .4, .6, .8
|
||||
h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter
|
||||
self.check_response(h, [(.1,0), (.3,1), (.5,0)])
|
||||
|
||||
h = firwin(N+4, [f1, f2]) # band-stop filter
|
||||
self.check_response(h, [(.1,1), (.3,0), (.5,1)])
|
||||
|
||||
h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False)
|
||||
self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)])
|
||||
|
||||
h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter
|
||||
self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)])
|
||||
|
||||
h = firwin(N+7, 0.1, width=.03) # low-pass
|
||||
self.check_response(h, [(.05,1), (.75,0)])
|
||||
|
||||
h = firwin(N+8, 0.1, pass_zero=False) # high-pass
|
||||
self.check_response(h, [(.05,0), (.75,1)])
|
||||
|
||||
def mse(self, h, bands):
|
||||
"""Compute mean squared error versus ideal response across frequency
|
||||
band.
|
||||
h -- coefficients
|
||||
bands -- list of (left, right) tuples relative to 1==Nyquist of
|
||||
passbands
|
||||
"""
|
||||
w, H = freqz(h, worN=1024)
|
||||
f = w/np.pi
|
||||
passIndicator = np.zeros(len(w), bool)
|
||||
for left, right in bands:
|
||||
passIndicator |= (f >= left) & (f < right)
|
||||
Hideal = np.where(passIndicator, 1, 0)
|
||||
mse = np.mean(abs(abs(H)-Hideal)**2)
|
||||
return mse
|
||||
|
||||
def test_scaling(self):
|
||||
"""
|
||||
For one lowpass, bandpass, and highpass example filter, this test
|
||||
checks two things:
|
||||
- the mean squared error over the frequency domain of the unscaled
|
||||
filter is smaller than the scaled filter (true for rectangular
|
||||
window)
|
||||
- the response of the scaled filter is exactly unity at the center
|
||||
of the first passband
|
||||
"""
|
||||
N = 11
|
||||
cases = [
|
||||
([.5], True, (0, 1)),
|
||||
([0.2, .6], False, (.4, 1)),
|
||||
([.5], False, (1, 1)),
|
||||
]
|
||||
for cutoff, pass_zero, expected_response in cases:
|
||||
h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones')
|
||||
hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones')
|
||||
if len(cutoff) == 1:
|
||||
if pass_zero:
|
||||
cutoff = [0] + cutoff
|
||||
else:
|
||||
cutoff = cutoff + [1]
|
||||
assert_(self.mse(h, [cutoff]) < self.mse(hs, [cutoff]),
|
||||
'least squares violation')
|
||||
self.check_response(hs, [expected_response], 1e-12)
|
||||
|
||||
def test_fs_validation(self):
|
||||
with pytest.raises(ValueError, match="Sampling.*single scalar"):
|
||||
firwin(51, .5, fs=np.array([10, 20]))
|
||||
|
||||
|
||||
class TestFirWinMore:
|
||||
"""Different author, different style, different tests..."""
|
||||
|
||||
def test_lowpass(self):
|
||||
width = 0.04
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False)
|
||||
taps = firwin(ntaps, **kwargs)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
|
||||
|
||||
# Check the gain at a few samples where
|
||||
# we know it should be approximately 0 or 1.
|
||||
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
|
||||
|
||||
taps_str = firwin(ntaps, pass_zero='lowpass', **kwargs)
|
||||
assert_allclose(taps, taps_str)
|
||||
|
||||
def test_highpass(self):
|
||||
width = 0.04
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
|
||||
# Ensure that ntaps is odd.
|
||||
ntaps |= 1
|
||||
|
||||
kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False)
|
||||
taps = firwin(ntaps, pass_zero=False, **kwargs)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
|
||||
|
||||
# Check the gain at a few samples where
|
||||
# we know it should be approximately 0 or 1.
|
||||
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
|
||||
|
||||
taps_str = firwin(ntaps, pass_zero='highpass', **kwargs)
|
||||
assert_allclose(taps, taps_str)
|
||||
|
||||
def test_bandpass(self):
|
||||
width = 0.04
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
kwargs = dict(cutoff=[0.3, 0.7], window=('kaiser', beta), scale=False)
|
||||
taps = firwin(ntaps, pass_zero=False, **kwargs)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
|
||||
|
||||
# Check the gain at a few samples where
|
||||
# we know it should be approximately 0 or 1.
|
||||
freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5,
|
||||
0.7-width/2, 0.7+width/2, 0.8, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
|
||||
|
||||
taps_str = firwin(ntaps, pass_zero='bandpass', **kwargs)
|
||||
assert_allclose(taps, taps_str)
|
||||
|
||||
def test_bandstop_multi(self):
|
||||
width = 0.04
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
kwargs = dict(cutoff=[0.2, 0.5, 0.8], window=('kaiser', beta),
|
||||
scale=False)
|
||||
taps = firwin(ntaps, **kwargs)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
|
||||
|
||||
# Check the gain at a few samples where
|
||||
# we know it should be approximately 0 or 1.
|
||||
freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35,
|
||||
0.5-width/2, 0.5+width/2, 0.65,
|
||||
0.8-width/2, 0.8+width/2, 0.9, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
|
||||
decimal=5)
|
||||
|
||||
taps_str = firwin(ntaps, pass_zero='bandstop', **kwargs)
|
||||
assert_allclose(taps, taps_str)
|
||||
|
||||
def test_fs_nyq(self):
|
||||
"""Test the fs and nyq keywords."""
|
||||
nyquist = 1000
|
||||
width = 40.0
|
||||
relative_width = width/nyquist
|
||||
ntaps, beta = kaiserord(120, relative_width)
|
||||
taps = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
|
||||
pass_zero=False, scale=False, fs=2*nyquist)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
|
||||
|
||||
# Check the gain at a few samples where
|
||||
# we know it should be approximately 0 or 1.
|
||||
freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500,
|
||||
700-width/2, 700+width/2, 800, 1000])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
|
||||
|
||||
def test_bad_cutoff(self):
|
||||
"""Test that invalid cutoff argument raises ValueError."""
|
||||
# cutoff values must be greater than 0 and less than 1.
|
||||
assert_raises(ValueError, firwin, 99, -0.5)
|
||||
assert_raises(ValueError, firwin, 99, 1.5)
|
||||
# Don't allow 0 or 1 in cutoff.
|
||||
assert_raises(ValueError, firwin, 99, [0, 0.5])
|
||||
assert_raises(ValueError, firwin, 99, [0.5, 1])
|
||||
# cutoff values must be strictly increasing.
|
||||
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2])
|
||||
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5])
|
||||
# Must have at least one cutoff value.
|
||||
assert_raises(ValueError, firwin, 99, [])
|
||||
# 2D array not allowed.
|
||||
assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]])
|
||||
# cutoff values must be less than nyq.
|
||||
assert_raises(ValueError, firwin, 99, 50.0, fs=80)
|
||||
assert_raises(ValueError, firwin, 99, [10, 20, 30], fs=50)
|
||||
|
||||
def test_even_highpass_raises_value_error(self):
|
||||
"""Test that attempt to create a highpass filter with an even number
|
||||
of taps raises a ValueError exception."""
|
||||
assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False)
|
||||
assert_raises(ValueError, firwin, 40, [.25, 0.5])
|
||||
|
||||
def test_bad_pass_zero(self):
|
||||
"""Test degenerate pass_zero cases."""
|
||||
with assert_raises(ValueError, match='pass_zero must be'):
|
||||
firwin(41, 0.5, pass_zero='foo')
|
||||
with assert_raises(TypeError, match='cannot be interpreted'):
|
||||
firwin(41, 0.5, pass_zero=1.)
|
||||
for pass_zero in ('lowpass', 'highpass'):
|
||||
with assert_raises(ValueError, match='cutoff must have one'):
|
||||
firwin(41, [0.5, 0.6], pass_zero=pass_zero)
|
||||
for pass_zero in ('bandpass', 'bandstop'):
|
||||
with assert_raises(ValueError, match='must have at least two'):
|
||||
firwin(41, [0.5], pass_zero=pass_zero)
|
||||
|
||||
def test_fs_validation(self):
|
||||
with pytest.raises(ValueError, match="Sampling.*single scalar"):
|
||||
firwin2(51, .5, 1, fs=np.array([10, 20]))
|
||||
|
||||
|
||||
class TestFirwin2:
|
||||
|
||||
def test_invalid_args(self):
|
||||
# `freq` and `gain` have different lengths.
|
||||
with assert_raises(ValueError, match='must be of same length'):
|
||||
firwin2(50, [0, 0.5, 1], [0.0, 1.0])
|
||||
# `nfreqs` is less than `ntaps`.
|
||||
with assert_raises(ValueError, match='ntaps must be less than nfreqs'):
|
||||
firwin2(50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33)
|
||||
# Decreasing value in `freq`
|
||||
with assert_raises(ValueError, match='must be nondecreasing'):
|
||||
firwin2(50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0])
|
||||
# Value in `freq` repeated more than once.
|
||||
with assert_raises(ValueError, match='must not occur more than twice'):
|
||||
firwin2(50, [0, .1, .1, .1, 1.0], [0.0, 0.5, 0.75, 1.0, 1.0])
|
||||
# `freq` does not start at 0.0.
|
||||
with assert_raises(ValueError, match='start with 0'):
|
||||
firwin2(50, [0.5, 1.0], [0.0, 1.0])
|
||||
# `freq` does not end at fs/2.
|
||||
with assert_raises(ValueError, match='end with fs/2'):
|
||||
firwin2(50, [0.0, 0.5], [0.0, 1.0])
|
||||
# Value 0 is repeated in `freq`
|
||||
with assert_raises(ValueError, match='0 must not be repeated'):
|
||||
firwin2(50, [0.0, 0.0, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0])
|
||||
# Value fs/2 is repeated in `freq`
|
||||
with assert_raises(ValueError, match='fs/2 must not be repeated'):
|
||||
firwin2(50, [0.0, 0.5, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0])
|
||||
# Value in `freq` that is too close to a repeated number
|
||||
with assert_raises(ValueError, match='cannot contain numbers '
|
||||
'that are too close'):
|
||||
firwin2(50, [0.0, 0.5 - np.finfo(float).eps * 0.5, 0.5, 0.5, 1.0],
|
||||
[1.0, 1.0, 1.0, 0.0, 0.0])
|
||||
|
||||
# Type II filter, but the gain at nyquist frequency is not zero.
|
||||
with assert_raises(ValueError, match='Type II filter'):
|
||||
firwin2(16, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0])
|
||||
|
||||
# Type III filter, but the gains at nyquist and zero rate are not zero.
|
||||
with assert_raises(ValueError, match='Type III filter'):
|
||||
firwin2(17, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0], antisymmetric=True)
|
||||
with assert_raises(ValueError, match='Type III filter'):
|
||||
firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True)
|
||||
with assert_raises(ValueError, match='Type III filter'):
|
||||
firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 1.0], antisymmetric=True)
|
||||
|
||||
# Type IV filter, but the gain at zero rate is not zero.
|
||||
with assert_raises(ValueError, match='Type IV filter'):
|
||||
firwin2(16, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True)
|
||||
|
||||
def test01(self):
|
||||
width = 0.04
|
||||
beta = 12.0
|
||||
ntaps = 400
|
||||
# Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w
|
||||
# increases from w=0.5 to w=1 (w=1 is the Nyquist frequency).
|
||||
freq = [0.0, 0.5, 1.0]
|
||||
gain = [1.0, 1.0, 0.0]
|
||||
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
|
||||
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2,
|
||||
0.75, 1.0-width/2])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[1.0, 1.0, 1.0, 1.0-width, 0.5, width], decimal=5)
|
||||
|
||||
def test02(self):
|
||||
width = 0.04
|
||||
beta = 12.0
|
||||
# ntaps must be odd for positive gain at Nyquist.
|
||||
ntaps = 401
|
||||
# An ideal highpass filter.
|
||||
freq = [0.0, 0.5, 0.5, 1.0]
|
||||
gain = [0.0, 0.0, 1.0, 1.0]
|
||||
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
|
||||
freq_samples = np.array([0.0, 0.25, 0.5-width, 0.5+width, 0.75, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
|
||||
|
||||
def test03(self):
|
||||
width = 0.02
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
# ntaps must be odd for positive gain at Nyquist.
|
||||
ntaps = int(ntaps) | 1
|
||||
freq = [0.0, 0.4, 0.4, 0.5, 0.5, 1.0]
|
||||
gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0]
|
||||
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
|
||||
freq_samples = np.array([0.0, 0.4-width, 0.4+width, 0.45,
|
||||
0.5-width, 0.5+width, 0.75, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
|
||||
|
||||
def test04(self):
|
||||
"""Test firwin2 when window=None."""
|
||||
ntaps = 5
|
||||
# Ideal lowpass: gain is 1 on [0,0.5], and 0 on [0.5, 1.0]
|
||||
freq = [0.0, 0.5, 0.5, 1.0]
|
||||
gain = [1.0, 1.0, 0.0, 0.0]
|
||||
taps = firwin2(ntaps, freq, gain, window=None, nfreqs=8193)
|
||||
alpha = 0.5 * (ntaps - 1)
|
||||
m = np.arange(0, ntaps) - alpha
|
||||
h = 0.5 * sinc(0.5 * m)
|
||||
assert_array_almost_equal(h, taps)
|
||||
|
||||
def test05(self):
|
||||
"""Test firwin2 for calculating Type IV filters"""
|
||||
ntaps = 1500
|
||||
|
||||
freq = [0.0, 1.0]
|
||||
gain = [0.0, 1.0]
|
||||
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
|
||||
assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2:][::-1])
|
||||
|
||||
freqs, response = freqz(taps, worN=2048)
|
||||
assert_array_almost_equal(abs(response), freqs / np.pi, decimal=4)
|
||||
|
||||
def test06(self):
|
||||
"""Test firwin2 for calculating Type III filters"""
|
||||
ntaps = 1501
|
||||
|
||||
freq = [0.0, 0.5, 0.55, 1.0]
|
||||
gain = [0.0, 0.5, 0.0, 0.0]
|
||||
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
|
||||
assert_equal(taps[ntaps // 2], 0.0)
|
||||
assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2 + 1:][::-1])
|
||||
|
||||
freqs, response1 = freqz(taps, worN=2048)
|
||||
response2 = np.interp(freqs / np.pi, freq, gain)
|
||||
assert_array_almost_equal(abs(response1), response2, decimal=3)
|
||||
|
||||
def test_fs_nyq(self):
|
||||
taps1 = firwin2(80, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
|
||||
taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], fs=120.0)
|
||||
assert_array_almost_equal(taps1, taps2)
|
||||
|
||||
def test_tuple(self):
|
||||
taps1 = firwin2(150, (0.0, 0.5, 0.5, 1.0), (1.0, 1.0, 0.0, 0.0))
|
||||
taps2 = firwin2(150, [0.0, 0.5, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0])
|
||||
assert_array_almost_equal(taps1, taps2)
|
||||
|
||||
def test_input_modyfication(self):
|
||||
freq1 = np.array([0.0, 0.5, 0.5, 1.0])
|
||||
freq2 = np.array(freq1)
|
||||
firwin2(80, freq1, [1.0, 1.0, 0.0, 0.0])
|
||||
assert_equal(freq1, freq2)
|
||||
|
||||
|
||||
class TestRemez:
|
||||
|
||||
def test_bad_args(self):
|
||||
assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka')
|
||||
|
||||
def test_hilbert(self):
|
||||
N = 11 # number of taps in the filter
|
||||
a = 0.1 # width of the transition band
|
||||
|
||||
# design an unity gain hilbert bandpass filter from w to 0.5-w
|
||||
h = remez(11, [a, 0.5-a], [1], type='hilbert')
|
||||
|
||||
# make sure the filter has correct # of taps
|
||||
assert_(len(h) == N, "Number of Taps")
|
||||
|
||||
# make sure it is type III (anti-symmetric tap coefficients)
|
||||
assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1])
|
||||
|
||||
# Since the requested response is symmetric, all even coefficients
|
||||
# should be zero (or in this case really small)
|
||||
assert_((abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero")
|
||||
|
||||
# now check the frequency response
|
||||
w, H = freqz(h, 1)
|
||||
f = w/2/np.pi
|
||||
Hmag = abs(H)
|
||||
|
||||
# should have a zero at 0 and pi (in this case close to zero)
|
||||
assert_((Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi")
|
||||
|
||||
# check that the pass band is close to unity
|
||||
idx = np.logical_and(f > a, f < 0.5-a)
|
||||
assert_((abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity")
|
||||
|
||||
def test_compare(self):
|
||||
# test comparison to MATLAB
|
||||
k = [0.024590270518440, -0.041314581814658, -0.075943803756711,
|
||||
-0.003530911231040, 0.193140296954975, 0.373400753484939,
|
||||
0.373400753484939, 0.193140296954975, -0.003530911231040,
|
||||
-0.075943803756711, -0.041314581814658, 0.024590270518440]
|
||||
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
|
||||
assert_allclose(h, k)
|
||||
|
||||
h = [-0.038976016082299, 0.018704846485491, -0.014644062687875,
|
||||
0.002879152556419, 0.016849978528150, -0.043276706138248,
|
||||
0.073641298245579, -0.103908158578635, 0.129770906801075,
|
||||
-0.147163447297124, 0.153302248456347, -0.147163447297124,
|
||||
0.129770906801075, -0.103908158578635, 0.073641298245579,
|
||||
-0.043276706138248, 0.016849978528150, 0.002879152556419,
|
||||
-0.014644062687875, 0.018704846485491, -0.038976016082299]
|
||||
assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.), h)
|
||||
|
||||
def test_fs_validation(self):
|
||||
with pytest.raises(ValueError, match="Sampling.*single scalar"):
|
||||
remez(11, .1, 1, fs=np.array([10, 20]))
|
||||
|
||||
class TestFirls:
|
||||
|
||||
def test_bad_args(self):
|
||||
# even numtaps
|
||||
assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0])
|
||||
# odd bands
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0])
|
||||
# len(bands) != len(desired)
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0])
|
||||
# non-monotonic bands
|
||||
assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0])
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], [0] * 4)
|
||||
assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], [0] * 4)
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], [0] * 4)
|
||||
# negative desired
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2], [-1, 1])
|
||||
# len(weight) != len(pairs)
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], weight=[1, 2])
|
||||
# negative weight
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], weight=[-1])
|
||||
|
||||
def test_firls(self):
|
||||
N = 11 # number of taps in the filter
|
||||
a = 0.1 # width of the transition band
|
||||
|
||||
# design a halfband symmetric low-pass filter
|
||||
h = firls(11, [0, a, 0.5-a, 0.5], [1, 1, 0, 0], fs=1.0)
|
||||
|
||||
# make sure the filter has correct # of taps
|
||||
assert_equal(len(h), N)
|
||||
|
||||
# make sure it is symmetric
|
||||
midx = (N-1) // 2
|
||||
assert_array_almost_equal(h[:midx], h[:-midx-1:-1])
|
||||
|
||||
# make sure the center tap is 0.5
|
||||
assert_almost_equal(h[midx], 0.5)
|
||||
|
||||
# For halfband symmetric, odd coefficients (except the center)
|
||||
# should be zero (really small)
|
||||
hodd = np.hstack((h[1:midx:2], h[-midx+1::2]))
|
||||
assert_array_almost_equal(hodd, 0)
|
||||
|
||||
# now check the frequency response
|
||||
w, H = freqz(h, 1)
|
||||
f = w/2/np.pi
|
||||
Hmag = np.abs(H)
|
||||
|
||||
# check that the pass band is close to unity
|
||||
idx = np.logical_and(f > 0, f < a)
|
||||
assert_array_almost_equal(Hmag[idx], 1, decimal=3)
|
||||
|
||||
# check that the stop band is close to zero
|
||||
idx = np.logical_and(f > 0.5-a, f < 0.5)
|
||||
assert_array_almost_equal(Hmag[idx], 0, decimal=3)
|
||||
|
||||
def test_compare(self):
|
||||
# compare to OCTAVE output
|
||||
taps = firls(9, [0, 0.5, 0.55, 1], [1, 1, 0, 0], weight=[1, 2])
|
||||
# >> taps = firls(8, [0 0.5 0.55 1], [1 1 0 0], [1, 2]);
|
||||
known_taps = [-6.26930101730182e-04, -1.03354450635036e-01,
|
||||
-9.81576747564301e-03, 3.17271686090449e-01,
|
||||
5.11409425599933e-01, 3.17271686090449e-01,
|
||||
-9.81576747564301e-03, -1.03354450635036e-01,
|
||||
-6.26930101730182e-04]
|
||||
assert_allclose(taps, known_taps)
|
||||
|
||||
# compare to MATLAB output
|
||||
taps = firls(11, [0, 0.5, 0.5, 1], [1, 1, 0, 0], weight=[1, 2])
|
||||
# >> taps = firls(10, [0 0.5 0.5 1], [1 1 0 0], [1, 2]);
|
||||
known_taps = [
|
||||
0.058545300496815, -0.014233383714318, -0.104688258464392,
|
||||
0.012403323025279, 0.317930861136062, 0.488047220029700,
|
||||
0.317930861136062, 0.012403323025279, -0.104688258464392,
|
||||
-0.014233383714318, 0.058545300496815]
|
||||
assert_allclose(taps, known_taps)
|
||||
|
||||
# With linear changes:
|
||||
taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], fs=20)
|
||||
# >> taps = firls(6, [0, 0.1, 0.2, 0.3, 0.4, 0.5], [1, 0, 0, 1, 1, 0])
|
||||
known_taps = [
|
||||
1.156090832768218, -4.1385894727395849, 7.5288619164321826,
|
||||
-8.5530572592947856, 7.5288619164321826, -4.1385894727395849,
|
||||
1.156090832768218]
|
||||
assert_allclose(taps, known_taps)
|
||||
|
||||
def test_rank_deficient(self):
|
||||
# solve() runs but warns (only sometimes, so here we don't use match)
|
||||
x = firls(21, [0, 0.1, 0.9, 1], [1, 1, 0, 0])
|
||||
w, h = freqz(x, fs=2.)
|
||||
assert_allclose(np.abs(h[:2]), 1., atol=1e-5)
|
||||
assert_allclose(np.abs(h[-2:]), 0., atol=1e-6)
|
||||
# switch to pinvh (tolerances could be higher with longer
|
||||
# filters, but using shorter ones is faster computationally and
|
||||
# the idea is the same)
|
||||
x = firls(101, [0, 0.01, 0.99, 1], [1, 1, 0, 0])
|
||||
w, h = freqz(x, fs=2.)
|
||||
mask = w < 0.01
|
||||
assert mask.sum() > 3
|
||||
assert_allclose(np.abs(h[mask]), 1., atol=1e-4)
|
||||
mask = w > 0.99
|
||||
assert mask.sum() > 3
|
||||
assert_allclose(np.abs(h[mask]), 0., atol=1e-4)
|
||||
|
||||
def test_fs_validation(self):
|
||||
with pytest.raises(ValueError, match="Sampling.*single scalar"):
|
||||
firls(11, .1, 1, fs=np.array([10, 20]))
|
||||
|
||||
class TestMinimumPhase:
|
||||
|
||||
def test_bad_args(self):
|
||||
# not enough taps
|
||||
assert_raises(ValueError, minimum_phase, [1.])
|
||||
assert_raises(ValueError, minimum_phase, [1., 1.])
|
||||
assert_raises(ValueError, minimum_phase, np.full(10, 1j))
|
||||
assert_raises(ValueError, minimum_phase, 'foo')
|
||||
assert_raises(ValueError, minimum_phase, np.ones(10), n_fft=8)
|
||||
assert_raises(ValueError, minimum_phase, np.ones(10), method='foo')
|
||||
assert_warns(RuntimeWarning, minimum_phase, np.arange(3))
|
||||
with pytest.raises(ValueError, match="is only supported when"):
|
||||
minimum_phase(np.ones(3), method='hilbert', half=False)
|
||||
|
||||
def test_homomorphic(self):
|
||||
# check that it can recover frequency responses of arbitrary
|
||||
# linear-phase filters
|
||||
|
||||
# for some cases we can get the actual filter back
|
||||
h = [1, -1]
|
||||
h_new = minimum_phase(np.convolve(h, h[::-1]))
|
||||
assert_allclose(h_new, h, rtol=0.05)
|
||||
|
||||
# but in general we only guarantee we get the magnitude back
|
||||
rng = np.random.RandomState(0)
|
||||
for n in (2, 3, 10, 11, 15, 16, 17, 20, 21, 100, 101):
|
||||
h = rng.randn(n)
|
||||
h_linear = np.convolve(h, h[::-1])
|
||||
h_new = minimum_phase(h_linear)
|
||||
assert_allclose(np.abs(fft(h_new)), np.abs(fft(h)), rtol=1e-4)
|
||||
h_new = minimum_phase(h_linear, half=False)
|
||||
assert len(h_linear) == len(h_new)
|
||||
assert_allclose(np.abs(fft(h_new)), np.abs(fft(h_linear)), rtol=1e-4)
|
||||
|
||||
def test_hilbert(self):
|
||||
# compare to MATLAB output of reference implementation
|
||||
|
||||
# f=[0 0.3 0.5 1];
|
||||
# a=[1 1 0 0];
|
||||
# h=remez(11,f,a);
|
||||
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
|
||||
k = [0.349585548646686, 0.373552164395447, 0.326082685363438,
|
||||
0.077152207480935, -0.129943946349364, -0.059355880509749]
|
||||
m = minimum_phase(h, 'hilbert')
|
||||
assert_allclose(m, k, rtol=5e-3)
|
||||
|
||||
# f=[0 0.8 0.9 1];
|
||||
# a=[0 0 1 1];
|
||||
# h=remez(20,f,a);
|
||||
h = remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.)
|
||||
k = [0.232486803906329, -0.133551833687071, 0.151871456867244,
|
||||
-0.157957283165866, 0.151739294892963, -0.129293146705090,
|
||||
0.100787844523204, -0.065832656741252, 0.035361328741024,
|
||||
-0.014977068692269, -0.158416139047557]
|
||||
m = minimum_phase(h, 'hilbert', n_fft=2**19)
|
||||
assert_allclose(m, k, rtol=2e-3)
|
||||
1221
venv/lib/python3.12/site-packages/scipy/signal/tests/test_ltisys.py
Normal file
1221
venv/lib/python3.12/site-packages/scipy/signal/tests/test_ltisys.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,65 @@
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose, assert_array_equal
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from numpy.fft import fft, ifft
|
||||
|
||||
from scipy.signal import max_len_seq
|
||||
|
||||
|
||||
class TestMLS:
|
||||
|
||||
def test_mls_inputs(self):
|
||||
# can't all be zero state
|
||||
assert_raises(ValueError, max_len_seq,
|
||||
10, state=np.zeros(10))
|
||||
# wrong size state
|
||||
assert_raises(ValueError, max_len_seq, 10,
|
||||
state=np.ones(3))
|
||||
# wrong length
|
||||
assert_raises(ValueError, max_len_seq, 10, length=-1)
|
||||
assert_array_equal(max_len_seq(10, length=0)[0], [])
|
||||
# unknown taps
|
||||
assert_raises(ValueError, max_len_seq, 64)
|
||||
# bad taps
|
||||
assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1])
|
||||
|
||||
def test_mls_output(self):
|
||||
# define some alternate working taps
|
||||
alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4],
|
||||
8: [7, 5, 3]}
|
||||
# assume the other bit levels work, too slow to test higher orders...
|
||||
for nbits in range(2, 8):
|
||||
for state in [None, np.round(np.random.rand(nbits))]:
|
||||
for taps in [None, alt_taps[nbits]]:
|
||||
if state is not None and np.all(state == 0):
|
||||
state[0] = 1 # they can't all be zero
|
||||
orig_m = max_len_seq(nbits, state=state,
|
||||
taps=taps)[0]
|
||||
m = 2. * orig_m - 1. # convert to +/- 1 representation
|
||||
# First, make sure we got all 1's or -1
|
||||
err_msg = "mls had non binary terms"
|
||||
assert_array_equal(np.abs(m), np.ones_like(m),
|
||||
err_msg=err_msg)
|
||||
# Test via circular cross-correlation, which is just mult.
|
||||
# in the frequency domain with one signal conjugated
|
||||
tester = np.real(ifft(fft(m) * np.conj(fft(m))))
|
||||
out_len = 2**nbits - 1
|
||||
# impulse amplitude == test_len
|
||||
err_msg = "mls impulse has incorrect value"
|
||||
assert_allclose(tester[0], out_len, err_msg=err_msg)
|
||||
# steady-state is -1
|
||||
err_msg = "mls steady-state has incorrect value"
|
||||
assert_allclose(tester[1:], np.full(out_len - 1, -1),
|
||||
err_msg=err_msg)
|
||||
# let's do the split thing using a couple options
|
||||
for n in (1, 2**(nbits - 1)):
|
||||
m1, s1 = max_len_seq(nbits, state=state, taps=taps,
|
||||
length=n)
|
||||
m2, s2 = max_len_seq(nbits, state=s1, taps=taps,
|
||||
length=1)
|
||||
m3, s3 = max_len_seq(nbits, state=s2, taps=taps,
|
||||
length=out_len - n - 1)
|
||||
new_m = np.concatenate((m1, m2, m3))
|
||||
assert_array_equal(orig_m, new_m)
|
||||
|
||||
@ -0,0 +1,891 @@
|
||||
import copy
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (
|
||||
assert_,
|
||||
assert_equal,
|
||||
assert_allclose,
|
||||
assert_array_equal
|
||||
)
|
||||
import pytest
|
||||
from pytest import raises, warns
|
||||
|
||||
from scipy.signal._peak_finding import (
|
||||
argrelmax,
|
||||
argrelmin,
|
||||
peak_prominences,
|
||||
peak_widths,
|
||||
_unpack_condition_args,
|
||||
find_peaks,
|
||||
find_peaks_cwt,
|
||||
_identify_ridge_lines
|
||||
)
|
||||
from scipy.signal.windows import gaussian
|
||||
from scipy.signal._peak_finding_utils import _local_maxima_1d, PeakPropertyWarning
|
||||
|
||||
|
||||
def _gen_gaussians(center_locs, sigmas, total_length):
|
||||
xdata = np.arange(0, total_length).astype(float)
|
||||
out_data = np.zeros(total_length, dtype=float)
|
||||
for ind, sigma in enumerate(sigmas):
|
||||
tmp = (xdata - center_locs[ind]) / sigma
|
||||
out_data += np.exp(-(tmp**2))
|
||||
return out_data
|
||||
|
||||
|
||||
def _gen_gaussians_even(sigmas, total_length):
|
||||
num_peaks = len(sigmas)
|
||||
delta = total_length / (num_peaks + 1)
|
||||
center_locs = np.linspace(delta, total_length - delta, num=num_peaks).astype(int)
|
||||
out_data = _gen_gaussians(center_locs, sigmas, total_length)
|
||||
return out_data, center_locs
|
||||
|
||||
|
||||
def _gen_ridge_line(start_locs, max_locs, length, distances, gaps):
|
||||
"""
|
||||
Generate coordinates for a ridge line.
|
||||
|
||||
Will be a series of coordinates, starting a start_loc (length 2).
|
||||
The maximum distance between any adjacent columns will be
|
||||
`max_distance`, the max distance between adjacent rows
|
||||
will be `map_gap'.
|
||||
|
||||
`max_locs` should be the size of the intended matrix. The
|
||||
ending coordinates are guaranteed to be less than `max_locs`,
|
||||
although they may not approach `max_locs` at all.
|
||||
"""
|
||||
|
||||
def keep_bounds(num, max_val):
|
||||
out = max(num, 0)
|
||||
out = min(out, max_val)
|
||||
return out
|
||||
|
||||
gaps = copy.deepcopy(gaps)
|
||||
distances = copy.deepcopy(distances)
|
||||
|
||||
locs = np.zeros([length, 2], dtype=int)
|
||||
locs[0, :] = start_locs
|
||||
total_length = max_locs[0] - start_locs[0] - sum(gaps)
|
||||
if total_length < length:
|
||||
raise ValueError('Cannot generate ridge line according to constraints')
|
||||
dist_int = length / len(distances) - 1
|
||||
gap_int = length / len(gaps) - 1
|
||||
for ind in range(1, length):
|
||||
nextcol = locs[ind - 1, 1]
|
||||
nextrow = locs[ind - 1, 0] + 1
|
||||
if (ind % dist_int == 0) and (len(distances) > 0):
|
||||
nextcol += ((-1)**ind)*distances.pop()
|
||||
if (ind % gap_int == 0) and (len(gaps) > 0):
|
||||
nextrow += gaps.pop()
|
||||
nextrow = keep_bounds(nextrow, max_locs[0])
|
||||
nextcol = keep_bounds(nextcol, max_locs[1])
|
||||
locs[ind, :] = [nextrow, nextcol]
|
||||
|
||||
return [locs[:, 0], locs[:, 1]]
|
||||
|
||||
|
||||
class TestLocalMaxima1d:
|
||||
|
||||
def test_empty(self):
|
||||
"""Test with empty signal."""
|
||||
x = np.array([], dtype=np.float64)
|
||||
for array in _local_maxima_1d(x):
|
||||
assert_equal(array, np.array([]))
|
||||
assert_(array.base is None)
|
||||
|
||||
def test_linear(self):
|
||||
"""Test with linear signal."""
|
||||
x = np.linspace(0, 100)
|
||||
for array in _local_maxima_1d(x):
|
||||
assert_equal(array, np.array([]))
|
||||
assert_(array.base is None)
|
||||
|
||||
def test_simple(self):
|
||||
"""Test with simple signal."""
|
||||
x = np.linspace(-10, 10, 50)
|
||||
x[2::3] += 1
|
||||
expected = np.arange(2, 50, 3)
|
||||
for array in _local_maxima_1d(x):
|
||||
# For plateaus of size 1, the edges are identical with the
|
||||
# midpoints
|
||||
assert_equal(array, expected)
|
||||
assert_(array.base is None)
|
||||
|
||||
def test_flat_maxima(self):
|
||||
"""Test if flat maxima are detected correctly."""
|
||||
x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 2.99, 4, 4, 4, 4, -10,
|
||||
-5, -5, -5, -5, -5, -10])
|
||||
midpoints, left_edges, right_edges = _local_maxima_1d(x)
|
||||
assert_equal(midpoints, np.array([2, 4, 8, 12, 18]))
|
||||
assert_equal(left_edges, np.array([2, 4, 7, 11, 16]))
|
||||
assert_equal(right_edges, np.array([2, 5, 9, 14, 20]))
|
||||
|
||||
@pytest.mark.parametrize('x', [
|
||||
np.array([1., 0, 2]),
|
||||
np.array([3., 3, 0, 4, 4]),
|
||||
np.array([5., 5, 5, 0, 6, 6, 6]),
|
||||
])
|
||||
def test_signal_edges(self, x):
|
||||
"""Test if behavior on signal edges is correct."""
|
||||
for array in _local_maxima_1d(x):
|
||||
assert_equal(array, np.array([]))
|
||||
assert_(array.base is None)
|
||||
|
||||
def test_exceptions(self):
|
||||
"""Test input validation and raised exceptions."""
|
||||
with raises(ValueError, match="wrong number of dimensions"):
|
||||
_local_maxima_1d(np.ones((1, 1)))
|
||||
with raises(ValueError, match="expected 'const float64_t'"):
|
||||
_local_maxima_1d(np.ones(1, dtype=int))
|
||||
with raises(TypeError, match="list"):
|
||||
_local_maxima_1d([1., 2.])
|
||||
with raises(TypeError, match="'x' must not be None"):
|
||||
_local_maxima_1d(None)
|
||||
|
||||
|
||||
class TestRidgeLines:
|
||||
|
||||
def test_empty(self):
|
||||
test_matr = np.zeros([20, 100])
|
||||
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
|
||||
assert_(len(lines) == 0)
|
||||
|
||||
def test_minimal(self):
|
||||
test_matr = np.zeros([20, 100])
|
||||
test_matr[0, 10] = 1
|
||||
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
|
||||
assert_(len(lines) == 1)
|
||||
|
||||
test_matr = np.zeros([20, 100])
|
||||
test_matr[0:2, 10] = 1
|
||||
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
|
||||
assert_(len(lines) == 1)
|
||||
|
||||
def test_single_pass(self):
|
||||
distances = [0, 1, 2, 5]
|
||||
gaps = [0, 1, 2, 0, 1]
|
||||
test_matr = np.zeros([20, 50]) + 1e-12
|
||||
length = 12
|
||||
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
|
||||
test_matr[line[0], line[1]] = 1
|
||||
max_distances = np.full(20, max(distances))
|
||||
identified_lines = _identify_ridge_lines(test_matr,
|
||||
max_distances,
|
||||
max(gaps) + 1)
|
||||
assert_array_equal(identified_lines, [line])
|
||||
|
||||
def test_single_bigdist(self):
|
||||
distances = [0, 1, 2, 5]
|
||||
gaps = [0, 1, 2, 4]
|
||||
test_matr = np.zeros([20, 50])
|
||||
length = 12
|
||||
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
|
||||
test_matr[line[0], line[1]] = 1
|
||||
max_dist = 3
|
||||
max_distances = np.full(20, max_dist)
|
||||
#This should get 2 lines, since the distance is too large
|
||||
identified_lines = _identify_ridge_lines(test_matr,
|
||||
max_distances,
|
||||
max(gaps) + 1)
|
||||
assert_(len(identified_lines) == 2)
|
||||
|
||||
for iline in identified_lines:
|
||||
adists = np.diff(iline[1])
|
||||
np.testing.assert_array_less(np.abs(adists), max_dist)
|
||||
|
||||
agaps = np.diff(iline[0])
|
||||
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
|
||||
|
||||
def test_single_biggap(self):
|
||||
distances = [0, 1, 2, 5]
|
||||
max_gap = 3
|
||||
gaps = [0, 4, 2, 1]
|
||||
test_matr = np.zeros([20, 50])
|
||||
length = 12
|
||||
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
|
||||
test_matr[line[0], line[1]] = 1
|
||||
max_dist = 6
|
||||
max_distances = np.full(20, max_dist)
|
||||
#This should get 2 lines, since the gap is too large
|
||||
identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
|
||||
assert_(len(identified_lines) == 2)
|
||||
|
||||
for iline in identified_lines:
|
||||
adists = np.diff(iline[1])
|
||||
np.testing.assert_array_less(np.abs(adists), max_dist)
|
||||
|
||||
agaps = np.diff(iline[0])
|
||||
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
|
||||
|
||||
def test_single_biggaps(self):
|
||||
distances = [0]
|
||||
max_gap = 1
|
||||
gaps = [3, 6]
|
||||
test_matr = np.zeros([50, 50])
|
||||
length = 30
|
||||
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
|
||||
test_matr[line[0], line[1]] = 1
|
||||
max_dist = 1
|
||||
max_distances = np.full(50, max_dist)
|
||||
#This should get 3 lines, since the gaps are too large
|
||||
identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
|
||||
assert_(len(identified_lines) == 3)
|
||||
|
||||
for iline in identified_lines:
|
||||
adists = np.diff(iline[1])
|
||||
np.testing.assert_array_less(np.abs(adists), max_dist)
|
||||
|
||||
agaps = np.diff(iline[0])
|
||||
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
|
||||
|
||||
|
||||
class TestArgrel:
|
||||
|
||||
def test_empty(self):
|
||||
# Regression test for gh-2832.
|
||||
# When there are no relative extrema, make sure that
|
||||
# the number of empty arrays returned matches the
|
||||
# dimension of the input.
|
||||
|
||||
empty_array = np.array([], dtype=int)
|
||||
|
||||
z1 = np.zeros(5)
|
||||
|
||||
i = argrelmin(z1)
|
||||
assert_equal(len(i), 1)
|
||||
assert_array_equal(i[0], empty_array)
|
||||
|
||||
z2 = np.zeros((3,5))
|
||||
|
||||
row, col = argrelmin(z2, axis=0)
|
||||
assert_array_equal(row, empty_array)
|
||||
assert_array_equal(col, empty_array)
|
||||
|
||||
row, col = argrelmin(z2, axis=1)
|
||||
assert_array_equal(row, empty_array)
|
||||
assert_array_equal(col, empty_array)
|
||||
|
||||
def test_basic(self):
|
||||
# Note: the docstrings for the argrel{min,max,extrema} functions
|
||||
# do not give a guarantee of the order of the indices, so we'll
|
||||
# sort them before testing.
|
||||
|
||||
x = np.array([[1, 2, 2, 3, 2],
|
||||
[2, 1, 2, 2, 3],
|
||||
[3, 2, 1, 2, 2],
|
||||
[2, 3, 2, 1, 2],
|
||||
[1, 2, 3, 2, 1]])
|
||||
|
||||
row, col = argrelmax(x, axis=0)
|
||||
order = np.argsort(row)
|
||||
assert_equal(row[order], [1, 2, 3])
|
||||
assert_equal(col[order], [4, 0, 1])
|
||||
|
||||
row, col = argrelmax(x, axis=1)
|
||||
order = np.argsort(row)
|
||||
assert_equal(row[order], [0, 3, 4])
|
||||
assert_equal(col[order], [3, 1, 2])
|
||||
|
||||
row, col = argrelmin(x, axis=0)
|
||||
order = np.argsort(row)
|
||||
assert_equal(row[order], [1, 2, 3])
|
||||
assert_equal(col[order], [1, 2, 3])
|
||||
|
||||
row, col = argrelmin(x, axis=1)
|
||||
order = np.argsort(row)
|
||||
assert_equal(row[order], [1, 2, 3])
|
||||
assert_equal(col[order], [1, 2, 3])
|
||||
|
||||
def test_highorder(self):
|
||||
order = 2
|
||||
sigmas = [1.0, 2.0, 10.0, 5.0, 15.0]
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, 500)
|
||||
test_data[act_locs + order] = test_data[act_locs]*0.99999
|
||||
test_data[act_locs - order] = test_data[act_locs]*0.99999
|
||||
rel_max_locs = argrelmax(test_data, order=order, mode='clip')[0]
|
||||
|
||||
assert_(len(rel_max_locs) == len(act_locs))
|
||||
assert_((rel_max_locs == act_locs).all())
|
||||
|
||||
def test_2d_gaussians(self):
|
||||
sigmas = [1.0, 2.0, 10.0]
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, 100)
|
||||
rot_factor = 20
|
||||
rot_range = np.arange(0, len(test_data)) - rot_factor
|
||||
test_data_2 = np.vstack([test_data, test_data[rot_range]])
|
||||
rel_max_rows, rel_max_cols = argrelmax(test_data_2, axis=1, order=1)
|
||||
|
||||
for rw in range(0, test_data_2.shape[0]):
|
||||
inds = (rel_max_rows == rw)
|
||||
|
||||
assert_(len(rel_max_cols[inds]) == len(act_locs))
|
||||
assert_((act_locs == (rel_max_cols[inds] - rot_factor*rw)).all())
|
||||
|
||||
|
||||
class TestPeakProminences:
|
||||
|
||||
def test_empty(self):
|
||||
"""
|
||||
Test if an empty array is returned if no peaks are provided.
|
||||
"""
|
||||
out = peak_prominences([1, 2, 3], [])
|
||||
for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
|
||||
assert_(arr.size == 0)
|
||||
assert_(arr.dtype == dtype)
|
||||
|
||||
out = peak_prominences([], [])
|
||||
for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
|
||||
assert_(arr.size == 0)
|
||||
assert_(arr.dtype == dtype)
|
||||
|
||||
def test_basic(self):
|
||||
"""
|
||||
Test if height of prominences is correctly calculated in signal with
|
||||
rising baseline (peak widths are 1 sample).
|
||||
"""
|
||||
# Prepare basic signal
|
||||
x = np.array([-1, 1.2, 1.2, 1, 3.2, 1.3, 2.88, 2.1])
|
||||
peaks = np.array([1, 2, 4, 6])
|
||||
lbases = np.array([0, 0, 0, 5])
|
||||
rbases = np.array([3, 3, 5, 7])
|
||||
proms = x[peaks] - np.max([x[lbases], x[rbases]], axis=0)
|
||||
# Test if calculation matches handcrafted result
|
||||
out = peak_prominences(x, peaks)
|
||||
assert_equal(out[0], proms)
|
||||
assert_equal(out[1], lbases)
|
||||
assert_equal(out[2], rbases)
|
||||
|
||||
def test_edge_cases(self):
|
||||
"""
|
||||
Test edge cases.
|
||||
"""
|
||||
# Peaks have same height, prominence and bases
|
||||
x = [0, 2, 1, 2, 1, 2, 0]
|
||||
peaks = [1, 3, 5]
|
||||
proms, lbases, rbases = peak_prominences(x, peaks)
|
||||
assert_equal(proms, [2, 2, 2])
|
||||
assert_equal(lbases, [0, 0, 0])
|
||||
assert_equal(rbases, [6, 6, 6])
|
||||
|
||||
# Peaks have same height & prominence but different bases
|
||||
x = [0, 1, 0, 1, 0, 1, 0]
|
||||
peaks = np.array([1, 3, 5])
|
||||
proms, lbases, rbases = peak_prominences(x, peaks)
|
||||
assert_equal(proms, [1, 1, 1])
|
||||
assert_equal(lbases, peaks - 1)
|
||||
assert_equal(rbases, peaks + 1)
|
||||
|
||||
def test_non_contiguous(self):
|
||||
"""
|
||||
Test with non-C-contiguous input arrays.
|
||||
"""
|
||||
x = np.repeat([-9, 9, 9, 0, 3, 1], 2)
|
||||
peaks = np.repeat([1, 2, 4], 2)
|
||||
proms, lbases, rbases = peak_prominences(x[::2], peaks[::2])
|
||||
assert_equal(proms, [9, 9, 2])
|
||||
assert_equal(lbases, [0, 0, 3])
|
||||
assert_equal(rbases, [3, 3, 5])
|
||||
|
||||
def test_wlen(self):
|
||||
"""
|
||||
Test if wlen actually shrinks the evaluation range correctly.
|
||||
"""
|
||||
x = [0, 1, 2, 3, 1, 0, -1]
|
||||
peak = [3]
|
||||
# Test rounding behavior of wlen
|
||||
assert_equal(peak_prominences(x, peak), [3., 0, 6])
|
||||
for wlen, i in [(8, 0), (7, 0), (6, 0), (5, 1), (3.2, 1), (3, 2), (1.1, 2)]:
|
||||
assert_equal(peak_prominences(x, peak, wlen), [3. - i, 0 + i, 6 - i])
|
||||
|
||||
def test_exceptions(self):
|
||||
"""
|
||||
Verify that exceptions and warnings are raised.
|
||||
"""
|
||||
# x with dimension > 1
|
||||
with raises(ValueError, match='1-D array'):
|
||||
peak_prominences([[0, 1, 1, 0]], [1, 2])
|
||||
# peaks with dimension > 1
|
||||
with raises(ValueError, match='1-D array'):
|
||||
peak_prominences([0, 1, 1, 0], [[1, 2]])
|
||||
# x with dimension < 1
|
||||
with raises(ValueError, match='1-D array'):
|
||||
peak_prominences(3, [0,])
|
||||
|
||||
# empty x with supplied
|
||||
with raises(ValueError, match='not a valid index'):
|
||||
peak_prominences([], [0])
|
||||
# invalid indices with non-empty x
|
||||
for p in [-100, -1, 3, 1000]:
|
||||
with raises(ValueError, match='not a valid index'):
|
||||
peak_prominences([1, 0, 2], [p])
|
||||
|
||||
# peaks is not cast-able to np.intp
|
||||
with raises(TypeError, match='cannot safely cast'):
|
||||
peak_prominences([0, 1, 1, 0], [1.1, 2.3])
|
||||
|
||||
# wlen < 3
|
||||
with raises(ValueError, match='wlen'):
|
||||
peak_prominences(np.arange(10), [3, 5], wlen=1)
|
||||
|
||||
def test_warnings(self):
|
||||
"""
|
||||
Verify that appropriate warnings are raised.
|
||||
"""
|
||||
msg = "some peaks have a prominence of 0"
|
||||
for p in [0, 1, 2]:
|
||||
with warns(PeakPropertyWarning, match=msg):
|
||||
peak_prominences([1, 0, 2], [p,])
|
||||
with warns(PeakPropertyWarning, match=msg):
|
||||
peak_prominences([0, 1, 1, 1, 0], [2], wlen=2)
|
||||
|
||||
|
||||
class TestPeakWidths:
|
||||
|
||||
def test_empty(self):
|
||||
"""
|
||||
Test if an empty array is returned if no peaks are provided.
|
||||
"""
|
||||
widths = peak_widths([], [])[0]
|
||||
assert_(isinstance(widths, np.ndarray))
|
||||
assert_equal(widths.size, 0)
|
||||
widths = peak_widths([1, 2, 3], [])[0]
|
||||
assert_(isinstance(widths, np.ndarray))
|
||||
assert_equal(widths.size, 0)
|
||||
out = peak_widths([], [])
|
||||
for arr in out:
|
||||
assert_(isinstance(arr, np.ndarray))
|
||||
assert_equal(arr.size, 0)
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
|
||||
def test_basic(self):
|
||||
"""
|
||||
Test a simple use case with easy to verify results at different relative
|
||||
heights.
|
||||
"""
|
||||
x = np.array([1, 0, 1, 2, 1, 0, -1])
|
||||
prominence = 2
|
||||
for rel_height, width_true, lip_true, rip_true in [
|
||||
(0., 0., 3., 3.), # raises warning
|
||||
(0.25, 1., 2.5, 3.5),
|
||||
(0.5, 2., 2., 4.),
|
||||
(0.75, 3., 1.5, 4.5),
|
||||
(1., 4., 1., 5.),
|
||||
(2., 5., 1., 6.),
|
||||
(3., 5., 1., 6.)
|
||||
]:
|
||||
width_calc, height, lip_calc, rip_calc = peak_widths(
|
||||
x, [3], rel_height)
|
||||
assert_allclose(width_calc, width_true)
|
||||
assert_allclose(height, 2 - rel_height * prominence)
|
||||
assert_allclose(lip_calc, lip_true)
|
||||
assert_allclose(rip_calc, rip_true)
|
||||
|
||||
def test_non_contiguous(self):
|
||||
"""
|
||||
Test with non-C-contiguous input arrays.
|
||||
"""
|
||||
x = np.repeat([0, 100, 50], 4)
|
||||
peaks = np.repeat([1], 3)
|
||||
result = peak_widths(x[::4], peaks[::3])
|
||||
assert_equal(result, [0.75, 75, 0.75, 1.5])
|
||||
|
||||
def test_exceptions(self):
|
||||
"""
|
||||
Verify that argument validation works as intended.
|
||||
"""
|
||||
with raises(ValueError, match='1-D array'):
|
||||
# x with dimension > 1
|
||||
peak_widths(np.zeros((3, 4)), np.ones(3))
|
||||
with raises(ValueError, match='1-D array'):
|
||||
# x with dimension < 1
|
||||
peak_widths(3, [0])
|
||||
with raises(ValueError, match='1-D array'):
|
||||
# peaks with dimension > 1
|
||||
peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp))
|
||||
with raises(ValueError, match='1-D array'):
|
||||
# peaks with dimension < 1
|
||||
peak_widths(np.arange(10), 3)
|
||||
with raises(ValueError, match='not a valid index'):
|
||||
# peak pos exceeds x.size
|
||||
peak_widths(np.arange(10), [8, 11])
|
||||
with raises(ValueError, match='not a valid index'):
|
||||
# empty x with peaks supplied
|
||||
peak_widths([], [1, 2])
|
||||
with raises(TypeError, match='cannot safely cast'):
|
||||
# peak cannot be safely casted to intp
|
||||
peak_widths(np.arange(10), [1.1, 2.3])
|
||||
with raises(ValueError, match='rel_height'):
|
||||
# rel_height is < 0
|
||||
peak_widths([0, 1, 0, 1, 0], [1, 3], rel_height=-1)
|
||||
with raises(TypeError, match='None'):
|
||||
# prominence data contains None
|
||||
peak_widths([1, 2, 1], [1], prominence_data=(None, None, None))
|
||||
|
||||
def test_warnings(self):
|
||||
"""
|
||||
Verify that appropriate warnings are raised.
|
||||
"""
|
||||
msg = "some peaks have a width of 0"
|
||||
with warns(PeakPropertyWarning, match=msg):
|
||||
# Case: rel_height is 0
|
||||
peak_widths([0, 1, 0], [1], rel_height=0)
|
||||
with warns(PeakPropertyWarning, match=msg):
|
||||
# Case: prominence is 0 and bases are identical
|
||||
peak_widths(
|
||||
[0, 1, 1, 1, 0], [2],
|
||||
prominence_data=(np.array([0.], np.float64),
|
||||
np.array([2], np.intp),
|
||||
np.array([2], np.intp))
|
||||
)
|
||||
|
||||
def test_mismatching_prominence_data(self):
|
||||
"""Test with mismatching peak and / or prominence data."""
|
||||
x = [0, 1, 0]
|
||||
peak = [1]
|
||||
for i, (prominences, left_bases, right_bases) in enumerate([
|
||||
((1.,), (-1,), (2,)), # left base not in x
|
||||
((1.,), (0,), (3,)), # right base not in x
|
||||
((1.,), (2,), (0,)), # swapped bases same as peak
|
||||
((1., 1.), (0, 0), (2, 2)), # array shapes don't match peaks
|
||||
((1., 1.), (0,), (2,)), # arrays with different shapes
|
||||
((1.,), (0, 0), (2,)), # arrays with different shapes
|
||||
((1.,), (0,), (2, 2)) # arrays with different shapes
|
||||
]):
|
||||
# Make sure input is matches output of signal.peak_prominences
|
||||
prominence_data = (np.array(prominences, dtype=np.float64),
|
||||
np.array(left_bases, dtype=np.intp),
|
||||
np.array(right_bases, dtype=np.intp))
|
||||
# Test for correct exception
|
||||
if i < 3:
|
||||
match = "prominence data is invalid for peak"
|
||||
else:
|
||||
match = "arrays in `prominence_data` must have the same shape"
|
||||
with raises(ValueError, match=match):
|
||||
peak_widths(x, peak, prominence_data=prominence_data)
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
|
||||
def test_intersection_rules(self):
|
||||
"""Test if x == eval_height counts as an intersection."""
|
||||
# Flatt peak with two possible intersection points if evaluated at 1
|
||||
x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0]
|
||||
# relative height is 0 -> width is 0 as well, raises warning
|
||||
assert_allclose(peak_widths(x, peaks=[5], rel_height=0),
|
||||
[(0.,), (3.,), (5.,), (5.,)])
|
||||
# width_height == x counts as intersection -> nearest 1 is chosen
|
||||
assert_allclose(peak_widths(x, peaks=[5], rel_height=2/3),
|
||||
[(4.,), (1.,), (3.,), (7.,)])
|
||||
|
||||
|
||||
def test_unpack_condition_args():
|
||||
"""
|
||||
Verify parsing of condition arguments for `scipy.signal.find_peaks` function.
|
||||
"""
|
||||
x = np.arange(10)
|
||||
amin_true = x
|
||||
amax_true = amin_true + 10
|
||||
peaks = amin_true[1::2]
|
||||
|
||||
# Test unpacking with None or interval
|
||||
assert_((None, None) == _unpack_condition_args((None, None), x, peaks))
|
||||
assert_((1, None) == _unpack_condition_args(1, x, peaks))
|
||||
assert_((1, None) == _unpack_condition_args((1, None), x, peaks))
|
||||
assert_((None, 2) == _unpack_condition_args((None, 2), x, peaks))
|
||||
assert_((3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks))
|
||||
|
||||
# Test if borders are correctly reduced with `peaks`
|
||||
amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks)
|
||||
assert_equal(amin_calc, amin_true[peaks])
|
||||
assert_equal(amax_calc, amax_true[peaks])
|
||||
|
||||
# Test raises if array borders don't match x
|
||||
with raises(ValueError, match="array size of lower"):
|
||||
_unpack_condition_args(amin_true, np.arange(11), peaks)
|
||||
with raises(ValueError, match="array size of upper"):
|
||||
_unpack_condition_args((None, amin_true), np.arange(11), peaks)
|
||||
|
||||
|
||||
class TestFindPeaks:
|
||||
|
||||
# Keys of optionally returned properties
|
||||
property_keys = {'peak_heights', 'left_thresholds', 'right_thresholds',
|
||||
'prominences', 'left_bases', 'right_bases', 'widths',
|
||||
'width_heights', 'left_ips', 'right_ips'}
|
||||
|
||||
def test_constant(self):
|
||||
"""
|
||||
Test behavior for signal without local maxima.
|
||||
"""
|
||||
open_interval = (None, None)
|
||||
peaks, props = find_peaks(np.ones(10),
|
||||
height=open_interval, threshold=open_interval,
|
||||
prominence=open_interval, width=open_interval)
|
||||
assert_(peaks.size == 0)
|
||||
for key in self.property_keys:
|
||||
assert_(props[key].size == 0)
|
||||
|
||||
def test_plateau_size(self):
|
||||
"""
|
||||
Test plateau size condition for peaks.
|
||||
"""
|
||||
# Prepare signal with peaks with peak_height == plateau_size
|
||||
plateau_sizes = np.array([1, 2, 3, 4, 8, 20, 111])
|
||||
x = np.zeros(plateau_sizes.size * 2 + 1)
|
||||
x[1::2] = plateau_sizes
|
||||
repeats = np.ones(x.size, dtype=int)
|
||||
repeats[1::2] = x[1::2]
|
||||
x = np.repeat(x, repeats)
|
||||
|
||||
# Test full output
|
||||
peaks, props = find_peaks(x, plateau_size=(None, None))
|
||||
assert_equal(peaks, [1, 3, 7, 11, 18, 33, 100])
|
||||
assert_equal(props["plateau_sizes"], plateau_sizes)
|
||||
assert_equal(props["left_edges"], peaks - (plateau_sizes - 1) // 2)
|
||||
assert_equal(props["right_edges"], peaks + plateau_sizes // 2)
|
||||
|
||||
# Test conditions
|
||||
assert_equal(find_peaks(x, plateau_size=4)[0], [11, 18, 33, 100])
|
||||
assert_equal(find_peaks(x, plateau_size=(None, 3.5))[0], [1, 3, 7])
|
||||
assert_equal(find_peaks(x, plateau_size=(5, 50))[0], [18, 33])
|
||||
|
||||
def test_height_condition(self):
|
||||
"""
|
||||
Test height condition for peaks.
|
||||
"""
|
||||
x = (0., 1/3, 0., 2.5, 0, 4., 0)
|
||||
peaks, props = find_peaks(x, height=(None, None))
|
||||
assert_equal(peaks, np.array([1, 3, 5]))
|
||||
assert_equal(props['peak_heights'], np.array([1/3, 2.5, 4.]))
|
||||
assert_equal(find_peaks(x, height=0.5)[0], np.array([3, 5]))
|
||||
assert_equal(find_peaks(x, height=(None, 3))[0], np.array([1, 3]))
|
||||
assert_equal(find_peaks(x, height=(2, 3))[0], np.array([3]))
|
||||
|
||||
def test_threshold_condition(self):
|
||||
"""
|
||||
Test threshold condition for peaks.
|
||||
"""
|
||||
x = (0, 2, 1, 4, -1)
|
||||
peaks, props = find_peaks(x, threshold=(None, None))
|
||||
assert_equal(peaks, np.array([1, 3]))
|
||||
assert_equal(props['left_thresholds'], np.array([2, 3]))
|
||||
assert_equal(props['right_thresholds'], np.array([1, 5]))
|
||||
assert_equal(find_peaks(x, threshold=2)[0], np.array([3]))
|
||||
assert_equal(find_peaks(x, threshold=3.5)[0], np.array([]))
|
||||
assert_equal(find_peaks(x, threshold=(None, 5))[0], np.array([1, 3]))
|
||||
assert_equal(find_peaks(x, threshold=(None, 4))[0], np.array([1]))
|
||||
assert_equal(find_peaks(x, threshold=(2, 4))[0], np.array([]))
|
||||
|
||||
def test_distance_condition(self):
|
||||
"""
|
||||
Test distance condition for peaks.
|
||||
"""
|
||||
# Peaks of different height with constant distance 3
|
||||
peaks_all = np.arange(1, 21, 3)
|
||||
x = np.zeros(21)
|
||||
x[peaks_all] += np.linspace(1, 2, peaks_all.size)
|
||||
|
||||
# Test if peaks with "minimal" distance are still selected (distance = 3)
|
||||
assert_equal(find_peaks(x, distance=3)[0], peaks_all)
|
||||
|
||||
# Select every second peak (distance > 3)
|
||||
peaks_subset = find_peaks(x, distance=3.0001)[0]
|
||||
# Test if peaks_subset is subset of peaks_all
|
||||
assert_(
|
||||
np.setdiff1d(peaks_subset, peaks_all, assume_unique=True).size == 0
|
||||
)
|
||||
# Test if every second peak was removed
|
||||
assert_equal(np.diff(peaks_subset), 6)
|
||||
|
||||
# Test priority of peak removal
|
||||
x = [-2, 1, -1, 0, -3]
|
||||
peaks_subset = find_peaks(x, distance=10)[0] # use distance > x size
|
||||
assert_(peaks_subset.size == 1 and peaks_subset[0] == 1)
|
||||
|
||||
def test_prominence_condition(self):
|
||||
"""
|
||||
Test prominence condition for peaks.
|
||||
"""
|
||||
x = np.linspace(0, 10, 100)
|
||||
peaks_true = np.arange(1, 99, 2)
|
||||
offset = np.linspace(1, 10, peaks_true.size)
|
||||
x[peaks_true] += offset
|
||||
prominences = x[peaks_true] - x[peaks_true + 1]
|
||||
interval = (3, 9)
|
||||
keep = np.nonzero(
|
||||
(interval[0] <= prominences) & (prominences <= interval[1]))
|
||||
|
||||
peaks_calc, properties = find_peaks(x, prominence=interval)
|
||||
assert_equal(peaks_calc, peaks_true[keep])
|
||||
assert_equal(properties['prominences'], prominences[keep])
|
||||
assert_equal(properties['left_bases'], 0)
|
||||
assert_equal(properties['right_bases'], peaks_true[keep] + 1)
|
||||
|
||||
def test_width_condition(self):
|
||||
"""
|
||||
Test width condition for peaks.
|
||||
"""
|
||||
x = np.array([1, 0, 1, 2, 1, 0, -1, 4, 0])
|
||||
peaks, props = find_peaks(x, width=(None, 2), rel_height=0.75)
|
||||
assert_equal(peaks.size, 1)
|
||||
assert_equal(peaks, 7)
|
||||
assert_allclose(props['widths'], 1.35)
|
||||
assert_allclose(props['width_heights'], 1.)
|
||||
assert_allclose(props['left_ips'], 6.4)
|
||||
assert_allclose(props['right_ips'], 7.75)
|
||||
|
||||
def test_properties(self):
|
||||
"""
|
||||
Test returned properties.
|
||||
"""
|
||||
open_interval = (None, None)
|
||||
x = [0, 1, 0, 2, 1.5, 0, 3, 0, 5, 9]
|
||||
peaks, props = find_peaks(x,
|
||||
height=open_interval, threshold=open_interval,
|
||||
prominence=open_interval, width=open_interval)
|
||||
assert_(len(props) == len(self.property_keys))
|
||||
for key in self.property_keys:
|
||||
assert_(peaks.size == props[key].size)
|
||||
|
||||
def test_raises(self):
|
||||
"""
|
||||
Test exceptions raised by function.
|
||||
"""
|
||||
with raises(ValueError, match="1-D array"):
|
||||
find_peaks(np.array(1))
|
||||
with raises(ValueError, match="1-D array"):
|
||||
find_peaks(np.ones((2, 2)))
|
||||
with raises(ValueError, match="distance"):
|
||||
find_peaks(np.arange(10), distance=-1)
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:some peaks have a prominence of 0",
|
||||
"ignore:some peaks have a width of 0")
|
||||
def test_wlen_smaller_plateau(self):
|
||||
"""
|
||||
Test behavior of prominence and width calculation if the given window
|
||||
length is smaller than a peak's plateau size.
|
||||
|
||||
Regression test for gh-9110.
|
||||
"""
|
||||
peaks, props = find_peaks([0, 1, 1, 1, 0], prominence=(None, None),
|
||||
width=(None, None), wlen=2)
|
||||
assert_equal(peaks, 2)
|
||||
assert_equal(props["prominences"], 0)
|
||||
assert_equal(props["widths"], 0)
|
||||
assert_equal(props["width_heights"], 1)
|
||||
for key in ("left_bases", "right_bases", "left_ips", "right_ips"):
|
||||
assert_equal(props[key], peaks)
|
||||
|
||||
@pytest.mark.parametrize("kwargs", [
|
||||
{},
|
||||
{"distance": 3.0},
|
||||
{"prominence": (None, None)},
|
||||
{"width": (None, 2)},
|
||||
|
||||
])
|
||||
def test_readonly_array(self, kwargs):
|
||||
"""
|
||||
Test readonly arrays are accepted.
|
||||
"""
|
||||
x = np.linspace(0, 10, 15)
|
||||
x_readonly = x.copy()
|
||||
x_readonly.flags.writeable = False
|
||||
|
||||
peaks, _ = find_peaks(x)
|
||||
peaks_readonly, _ = find_peaks(x_readonly, **kwargs)
|
||||
|
||||
assert_allclose(peaks, peaks_readonly)
|
||||
|
||||
|
||||
class TestFindPeaksCwt:
|
||||
|
||||
def test_find_peaks_exact(self):
|
||||
"""
|
||||
Generate a series of gaussians and attempt to find the peak locations.
|
||||
"""
|
||||
sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
|
||||
num_points = 500
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
|
||||
widths = np.arange(0.1, max(sigmas))
|
||||
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0,
|
||||
min_length=None)
|
||||
np.testing.assert_array_equal(found_locs, act_locs,
|
||||
"Found maximum locations did not equal those expected")
|
||||
|
||||
def test_find_peaks_withnoise(self):
|
||||
"""
|
||||
Verify that peak locations are (approximately) found
|
||||
for a series of gaussians with added noise.
|
||||
"""
|
||||
sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
|
||||
num_points = 500
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
|
||||
widths = np.arange(0.1, max(sigmas))
|
||||
noise_amp = 0.07
|
||||
np.random.seed(18181911)
|
||||
test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)
|
||||
found_locs = find_peaks_cwt(test_data, widths, min_length=15,
|
||||
gap_thresh=1, min_snr=noise_amp / 5)
|
||||
|
||||
np.testing.assert_equal(len(found_locs), len(act_locs), 'Different number' +
|
||||
'of peaks found than expected')
|
||||
diffs = np.abs(found_locs - act_locs)
|
||||
max_diffs = np.array(sigmas) / 5
|
||||
np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' +
|
||||
'by more than %s' % (max_diffs))
|
||||
|
||||
def test_find_peaks_nopeak(self):
|
||||
"""
|
||||
Verify that no peak is found in
|
||||
data that's just noise.
|
||||
"""
|
||||
noise_amp = 1.0
|
||||
num_points = 100
|
||||
np.random.seed(181819141)
|
||||
test_data = (np.random.rand(num_points) - 0.5)*(2*noise_amp)
|
||||
widths = np.arange(10, 50)
|
||||
found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30)
|
||||
np.testing.assert_equal(len(found_locs), 0)
|
||||
|
||||
def test_find_peaks_with_non_default_wavelets(self):
|
||||
x = gaussian(200, 2)
|
||||
widths = np.array([1, 2, 3, 4])
|
||||
a = find_peaks_cwt(x, widths, wavelet=gaussian)
|
||||
|
||||
np.testing.assert_equal(np.array([100]), a)
|
||||
|
||||
def test_find_peaks_window_size(self):
|
||||
"""
|
||||
Verify that window_size is passed correctly to private function and
|
||||
affects the result.
|
||||
"""
|
||||
sigmas = [2.0, 2.0]
|
||||
num_points = 1000
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
|
||||
widths = np.arange(0.1, max(sigmas), 0.2)
|
||||
noise_amp = 0.05
|
||||
np.random.seed(18181911)
|
||||
test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)
|
||||
|
||||
# Possibly contrived negative region to throw off peak finding
|
||||
# when window_size is too large
|
||||
test_data[250:320] -= 1
|
||||
|
||||
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3,
|
||||
min_length=None, window_size=None)
|
||||
with pytest.raises(AssertionError):
|
||||
assert found_locs.size == act_locs.size
|
||||
|
||||
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3,
|
||||
min_length=None, window_size=20)
|
||||
assert found_locs.size == act_locs.size
|
||||
|
||||
def test_find_peaks_with_one_width(self):
|
||||
"""
|
||||
Verify that the `width` argument
|
||||
in `find_peaks_cwt` can be a float
|
||||
"""
|
||||
xs = np.arange(0, np.pi, 0.05)
|
||||
test_data = np.sin(xs)
|
||||
widths = 1
|
||||
found_locs = find_peaks_cwt(test_data, widths)
|
||||
|
||||
np.testing.assert_equal(found_locs, 32)
|
||||
@ -0,0 +1,52 @@
|
||||
# Regressions tests on result types of some signal functions
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_
|
||||
|
||||
from scipy.signal import (decimate,
|
||||
lfilter_zi,
|
||||
lfiltic,
|
||||
sos2tf,
|
||||
sosfilt_zi)
|
||||
|
||||
|
||||
def test_decimate():
|
||||
ones_f32 = np.ones(32, dtype=np.float32)
|
||||
assert_(decimate(ones_f32, 2).dtype == np.float32)
|
||||
|
||||
ones_i64 = np.ones(32, dtype=np.int64)
|
||||
assert_(decimate(ones_i64, 2).dtype == np.float64)
|
||||
|
||||
|
||||
def test_lfilter_zi():
|
||||
b_f32 = np.array([1, 2, 3], dtype=np.float32)
|
||||
a_f32 = np.array([4, 5, 6], dtype=np.float32)
|
||||
assert_(lfilter_zi(b_f32, a_f32).dtype == np.float32)
|
||||
|
||||
|
||||
def test_lfiltic():
|
||||
# this would return f32 when given a mix of f32 / f64 args
|
||||
b_f32 = np.array([1, 2, 3], dtype=np.float32)
|
||||
a_f32 = np.array([4, 5, 6], dtype=np.float32)
|
||||
x_f32 = np.ones(32, dtype=np.float32)
|
||||
|
||||
b_f64 = b_f32.astype(np.float64)
|
||||
a_f64 = a_f32.astype(np.float64)
|
||||
x_f64 = x_f32.astype(np.float64)
|
||||
|
||||
assert_(lfiltic(b_f64, a_f32, x_f32).dtype == np.float64)
|
||||
assert_(lfiltic(b_f32, a_f64, x_f32).dtype == np.float64)
|
||||
assert_(lfiltic(b_f32, a_f32, x_f64).dtype == np.float64)
|
||||
assert_(lfiltic(b_f32, a_f32, x_f32, x_f64).dtype == np.float64)
|
||||
|
||||
|
||||
def test_sos2tf():
|
||||
sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32)
|
||||
b, a = sos2tf(sos_f32)
|
||||
assert_(b.dtype == np.float32)
|
||||
assert_(a.dtype == np.float32)
|
||||
|
||||
|
||||
def test_sosfilt_zi():
|
||||
sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32)
|
||||
assert_(sosfilt_zi(sos_f32).dtype == np.float32)
|
||||
@ -0,0 +1,358 @@
|
||||
import pytest
|
||||
import numpy as np
|
||||
from numpy.testing import (assert_allclose, assert_equal,
|
||||
assert_almost_equal, assert_array_equal,
|
||||
assert_array_almost_equal)
|
||||
|
||||
from scipy.ndimage import convolve1d
|
||||
|
||||
from scipy.signal import savgol_coeffs, savgol_filter
|
||||
from scipy.signal._savitzky_golay import _polyder
|
||||
|
||||
|
||||
def check_polyder(p, m, expected):
|
||||
dp = _polyder(p, m)
|
||||
assert_array_equal(dp, expected)
|
||||
|
||||
|
||||
def test_polyder():
|
||||
cases = [
|
||||
([5], 0, [5]),
|
||||
([5], 1, [0]),
|
||||
([3, 2, 1], 0, [3, 2, 1]),
|
||||
([3, 2, 1], 1, [6, 2]),
|
||||
([3, 2, 1], 2, [6]),
|
||||
([3, 2, 1], 3, [0]),
|
||||
([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),
|
||||
([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),
|
||||
([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),
|
||||
([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),
|
||||
]
|
||||
for p, m, expected in cases:
|
||||
check_polyder(np.array(p).T, m, np.array(expected).T)
|
||||
|
||||
|
||||
#--------------------------------------------------------------------
|
||||
# savgol_coeffs tests
|
||||
#--------------------------------------------------------------------
|
||||
|
||||
def alt_sg_coeffs(window_length, polyorder, pos):
|
||||
"""This is an alternative implementation of the SG coefficients.
|
||||
|
||||
It uses numpy.polyfit and numpy.polyval. The results should be
|
||||
equivalent to those of savgol_coeffs(), but this implementation
|
||||
is slower.
|
||||
|
||||
window_length should be odd.
|
||||
|
||||
"""
|
||||
if pos is None:
|
||||
pos = window_length // 2
|
||||
t = np.arange(window_length)
|
||||
unit = (t == pos).astype(int)
|
||||
h = np.polyval(np.polyfit(t, unit, polyorder), t)
|
||||
return h
|
||||
|
||||
|
||||
def test_sg_coeffs_trivial():
|
||||
# Test a trivial case of savgol_coeffs: polyorder = window_length - 1
|
||||
h = savgol_coeffs(1, 0)
|
||||
assert_allclose(h, [1])
|
||||
|
||||
h = savgol_coeffs(3, 2)
|
||||
assert_allclose(h, [0, 1, 0], atol=1e-10)
|
||||
|
||||
h = savgol_coeffs(5, 4)
|
||||
assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10)
|
||||
|
||||
h = savgol_coeffs(5, 4, pos=1)
|
||||
assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10)
|
||||
|
||||
h = savgol_coeffs(5, 4, pos=1, use='dot')
|
||||
assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10)
|
||||
|
||||
|
||||
def compare_coeffs_to_alt(window_length, order):
|
||||
# For the given window_length and order, compare the results
|
||||
# of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.
|
||||
# Also include pos=None.
|
||||
for pos in [None] + list(range(window_length)):
|
||||
h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')
|
||||
h2 = alt_sg_coeffs(window_length, order, pos=pos)
|
||||
assert_allclose(h1, h2, atol=1e-10,
|
||||
err_msg=("window_length = %d, order = %d, pos = %s" %
|
||||
(window_length, order, pos)))
|
||||
|
||||
|
||||
def test_sg_coeffs_compare():
|
||||
# Compare savgol_coeffs() to alt_sg_coeffs().
|
||||
for window_length in range(1, 8, 2):
|
||||
for order in range(window_length):
|
||||
compare_coeffs_to_alt(window_length, order)
|
||||
|
||||
|
||||
def test_sg_coeffs_exact():
|
||||
polyorder = 4
|
||||
window_length = 9
|
||||
halflen = window_length // 2
|
||||
|
||||
x = np.linspace(0, 21, 43)
|
||||
delta = x[1] - x[0]
|
||||
|
||||
# The data is a cubic polynomial. We'll use an order 4
|
||||
# SG filter, so the filtered values should equal the input data
|
||||
# (except within half window_length of the edges).
|
||||
y = 0.5 * x ** 3 - x
|
||||
h = savgol_coeffs(window_length, polyorder)
|
||||
y0 = convolve1d(y, h)
|
||||
assert_allclose(y0[halflen:-halflen], y[halflen:-halflen])
|
||||
|
||||
# Check the same input, but use deriv=1. dy is the exact result.
|
||||
dy = 1.5 * x ** 2 - 1
|
||||
h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)
|
||||
y1 = convolve1d(y, h)
|
||||
assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen])
|
||||
|
||||
# Check the same input, but use deriv=2. d2y is the exact result.
|
||||
d2y = 3.0 * x
|
||||
h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)
|
||||
y2 = convolve1d(y, h)
|
||||
assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen])
|
||||
|
||||
|
||||
def test_sg_coeffs_deriv():
|
||||
# The data in `x` is a sampled parabola, so using savgol_coeffs with an
|
||||
# order 2 or higher polynomial should give exact results.
|
||||
i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])
|
||||
x = i ** 2 / 4
|
||||
dx = i / 2
|
||||
d2x = np.full_like(i, 0.5)
|
||||
for pos in range(x.size):
|
||||
coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')
|
||||
assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10)
|
||||
coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)
|
||||
assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10)
|
||||
coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)
|
||||
assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10)
|
||||
|
||||
|
||||
def test_sg_coeffs_deriv_gt_polyorder():
|
||||
"""
|
||||
If deriv > polyorder, the coefficients should be all 0.
|
||||
This is a regression test for a bug where, e.g.,
|
||||
savgol_coeffs(5, polyorder=1, deriv=2)
|
||||
raised an error.
|
||||
"""
|
||||
coeffs = savgol_coeffs(5, polyorder=1, deriv=2)
|
||||
assert_array_equal(coeffs, np.zeros(5))
|
||||
coeffs = savgol_coeffs(7, polyorder=4, deriv=6)
|
||||
assert_array_equal(coeffs, np.zeros(7))
|
||||
|
||||
|
||||
def test_sg_coeffs_large():
|
||||
# Test that for large values of window_length and polyorder the array of
|
||||
# coefficients returned is symmetric. The aim is to ensure that
|
||||
# no potential numeric overflow occurs.
|
||||
coeffs0 = savgol_coeffs(31, 9)
|
||||
assert_array_almost_equal(coeffs0, coeffs0[::-1])
|
||||
coeffs1 = savgol_coeffs(31, 9, deriv=1)
|
||||
assert_array_almost_equal(coeffs1, -coeffs1[::-1])
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# savgol_coeffs tests for even window length
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_sg_coeffs_even_window_length():
|
||||
# Simple case - deriv=0, polyorder=0, 1
|
||||
window_lengths = [4, 6, 8, 10, 12, 14, 16]
|
||||
for length in window_lengths:
|
||||
h_p_d = savgol_coeffs(length, 0, 0)
|
||||
assert_allclose(h_p_d, 1/length)
|
||||
|
||||
# Verify with closed forms
|
||||
# deriv=1, polyorder=1, 2
|
||||
def h_p_d_closed_form_1(k, m):
|
||||
return 6*(k - 0.5)/((2*m + 1)*m*(2*m - 1))
|
||||
|
||||
# deriv=2, polyorder=2
|
||||
def h_p_d_closed_form_2(k, m):
|
||||
numer = 15*(-4*m**2 + 1 + 12*(k - 0.5)**2)
|
||||
denom = 4*(2*m + 1)*(m + 1)*m*(m - 1)*(2*m - 1)
|
||||
return numer/denom
|
||||
|
||||
for length in window_lengths:
|
||||
m = length//2
|
||||
expected_output = [h_p_d_closed_form_1(k, m)
|
||||
for k in range(-m + 1, m + 1)][::-1]
|
||||
actual_output = savgol_coeffs(length, 1, 1)
|
||||
assert_allclose(expected_output, actual_output)
|
||||
actual_output = savgol_coeffs(length, 2, 1)
|
||||
assert_allclose(expected_output, actual_output)
|
||||
|
||||
expected_output = [h_p_d_closed_form_2(k, m)
|
||||
for k in range(-m + 1, m + 1)][::-1]
|
||||
actual_output = savgol_coeffs(length, 2, 2)
|
||||
assert_allclose(expected_output, actual_output)
|
||||
actual_output = savgol_coeffs(length, 3, 2)
|
||||
assert_allclose(expected_output, actual_output)
|
||||
|
||||
#--------------------------------------------------------------------
|
||||
# savgol_filter tests
|
||||
#--------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_sg_filter_trivial():
|
||||
""" Test some trivial edge cases for savgol_filter()."""
|
||||
x = np.array([1.0])
|
||||
y = savgol_filter(x, 1, 0)
|
||||
assert_equal(y, [1.0])
|
||||
|
||||
# Input is a single value. With a window length of 3 and polyorder 1,
|
||||
# the value in y is from the straight-line fit of (-1,0), (0,3) and
|
||||
# (1, 0) at 0. This is just the average of the three values, hence 1.0.
|
||||
x = np.array([3.0])
|
||||
y = savgol_filter(x, 3, 1, mode='constant')
|
||||
assert_almost_equal(y, [1.0], decimal=15)
|
||||
|
||||
x = np.array([3.0])
|
||||
y = savgol_filter(x, 3, 1, mode='nearest')
|
||||
assert_almost_equal(y, [3.0], decimal=15)
|
||||
|
||||
x = np.array([1.0] * 3)
|
||||
y = savgol_filter(x, 3, 1, mode='wrap')
|
||||
assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)
|
||||
|
||||
|
||||
def test_sg_filter_basic():
|
||||
# Some basic test cases for savgol_filter().
|
||||
x = np.array([1.0, 2.0, 1.0])
|
||||
y = savgol_filter(x, 3, 1, mode='constant')
|
||||
assert_allclose(y, [1.0, 4.0 / 3, 1.0])
|
||||
|
||||
y = savgol_filter(x, 3, 1, mode='mirror')
|
||||
assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])
|
||||
|
||||
y = savgol_filter(x, 3, 1, mode='wrap')
|
||||
assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])
|
||||
|
||||
|
||||
def test_sg_filter_2d():
|
||||
x = np.array([[1.0, 2.0, 1.0],
|
||||
[2.0, 4.0, 2.0]])
|
||||
expected = np.array([[1.0, 4.0 / 3, 1.0],
|
||||
[2.0, 8.0 / 3, 2.0]])
|
||||
y = savgol_filter(x, 3, 1, mode='constant')
|
||||
assert_allclose(y, expected)
|
||||
|
||||
y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
|
||||
assert_allclose(y, expected.T)
|
||||
|
||||
|
||||
def test_sg_filter_interp_edges():
|
||||
# Another test with low degree polynomial data, for which we can easily
|
||||
# give the exact results. In this test, we use mode='interp', so
|
||||
# savgol_filter should match the exact solution for the entire data set,
|
||||
# including the edges.
|
||||
t = np.linspace(-5, 5, 21)
|
||||
delta = t[1] - t[0]
|
||||
# Polynomial test data.
|
||||
x = np.array([t,
|
||||
3 * t ** 2,
|
||||
t ** 3 - t])
|
||||
dx = np.array([np.ones_like(t),
|
||||
6 * t,
|
||||
3 * t ** 2 - 1.0])
|
||||
d2x = np.array([np.zeros_like(t),
|
||||
np.full_like(t, 6),
|
||||
6 * t])
|
||||
|
||||
window_length = 7
|
||||
|
||||
y = savgol_filter(x, window_length, 3, axis=-1, mode='interp')
|
||||
assert_allclose(y, x, atol=1e-12)
|
||||
|
||||
y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
|
||||
deriv=1, delta=delta)
|
||||
assert_allclose(y1, dx, atol=1e-12)
|
||||
|
||||
y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
|
||||
deriv=2, delta=delta)
|
||||
assert_allclose(y2, d2x, atol=1e-12)
|
||||
|
||||
# Transpose everything, and test again with axis=0.
|
||||
|
||||
x = x.T
|
||||
dx = dx.T
|
||||
d2x = d2x.T
|
||||
|
||||
y = savgol_filter(x, window_length, 3, axis=0, mode='interp')
|
||||
assert_allclose(y, x, atol=1e-12)
|
||||
|
||||
y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
|
||||
deriv=1, delta=delta)
|
||||
assert_allclose(y1, dx, atol=1e-12)
|
||||
|
||||
y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
|
||||
deriv=2, delta=delta)
|
||||
assert_allclose(y2, d2x, atol=1e-12)
|
||||
|
||||
|
||||
def test_sg_filter_interp_edges_3d():
|
||||
# Test mode='interp' with a 3-D array.
|
||||
t = np.linspace(-5, 5, 21)
|
||||
delta = t[1] - t[0]
|
||||
x1 = np.array([t, -t])
|
||||
x2 = np.array([t ** 2, 3 * t ** 2 + 5])
|
||||
x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t])
|
||||
dx1 = np.array([np.ones_like(t), -np.ones_like(t)])
|
||||
dx2 = np.array([2 * t, 6 * t])
|
||||
dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5])
|
||||
|
||||
# z has shape (3, 2, 21)
|
||||
z = np.array([x1, x2, x3])
|
||||
dz = np.array([dx1, dx2, dx3])
|
||||
|
||||
y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta)
|
||||
assert_allclose(y, z, atol=1e-10)
|
||||
|
||||
dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta)
|
||||
assert_allclose(dy, dz, atol=1e-10)
|
||||
|
||||
# z has shape (3, 21, 2)
|
||||
z = np.array([x1.T, x2.T, x3.T])
|
||||
dz = np.array([dx1.T, dx2.T, dx3.T])
|
||||
|
||||
y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta)
|
||||
assert_allclose(y, z, atol=1e-10)
|
||||
|
||||
dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta)
|
||||
assert_allclose(dy, dz, atol=1e-10)
|
||||
|
||||
# z has shape (21, 3, 2)
|
||||
z = z.swapaxes(0, 1).copy()
|
||||
dz = dz.swapaxes(0, 1).copy()
|
||||
|
||||
y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta)
|
||||
assert_allclose(y, z, atol=1e-10)
|
||||
|
||||
dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta)
|
||||
assert_allclose(dy, dz, atol=1e-10)
|
||||
|
||||
|
||||
def test_sg_filter_valid_window_length_3d():
|
||||
"""Tests that the window_length check is using the correct axis."""
|
||||
|
||||
x = np.ones((10, 20, 30))
|
||||
|
||||
savgol_filter(x, window_length=29, polyorder=3, mode='interp')
|
||||
|
||||
with pytest.raises(ValueError, match='window_length must be less than'):
|
||||
# window_length is more than x.shape[-1].
|
||||
savgol_filter(x, window_length=31, polyorder=3, mode='interp')
|
||||
|
||||
savgol_filter(x, window_length=9, polyorder=3, axis=0, mode='interp')
|
||||
|
||||
with pytest.raises(ValueError, match='window_length must be less than'):
|
||||
# window_length is more than x.shape[0].
|
||||
savgol_filter(x, window_length=11, polyorder=3, axis=0, mode='interp')
|
||||
@ -0,0 +1,840 @@
|
||||
"""Unit tests for module `_short_time_fft`.
|
||||
|
||||
This file's structure loosely groups the tests into the following sequential
|
||||
categories:
|
||||
|
||||
1. Test function `_calc_dual_canonical_window`.
|
||||
2. Test for invalid parameters and exceptions in `ShortTimeFFT` (until the
|
||||
`test_from_window` function).
|
||||
3. Test algorithmic properties of STFT/ISTFT. Some tests were ported from
|
||||
``test_spectral.py``.
|
||||
|
||||
Notes
|
||||
-----
|
||||
* Mypy 0.990 does interpret the line::
|
||||
|
||||
from scipy.stats import norm as normal_distribution
|
||||
|
||||
incorrectly (but the code works), hence a ``type: ignore`` was appended.
|
||||
"""
|
||||
import math
|
||||
from itertools import product
|
||||
from typing import cast, get_args, Literal
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from numpy.testing import assert_allclose, assert_equal
|
||||
from scipy.fft import fftshift
|
||||
from scipy.stats import norm as normal_distribution # type: ignore
|
||||
from scipy.signal import get_window, welch, stft, istft, spectrogram
|
||||
|
||||
from scipy.signal._short_time_fft import FFT_MODE_TYPE, \
|
||||
_calc_dual_canonical_window, ShortTimeFFT, PAD_TYPE
|
||||
from scipy.signal.windows import gaussian
|
||||
|
||||
|
||||
def test__calc_dual_canonical_window_roundtrip():
|
||||
"""Test dual window calculation with a round trip to verify duality.
|
||||
|
||||
Note that this works only for canonical window pairs (having minimal
|
||||
energy) like a Gaussian.
|
||||
|
||||
The window is the same as in the example of `from ShortTimeFFT.from_dual`.
|
||||
"""
|
||||
win = gaussian(51, std=10, sym=True)
|
||||
d_win = _calc_dual_canonical_window(win, 10)
|
||||
win2 = _calc_dual_canonical_window(d_win, 10)
|
||||
assert_allclose(win2, win)
|
||||
|
||||
|
||||
def test__calc_dual_canonical_window_exceptions():
|
||||
"""Raise all exceptions in `_calc_dual_canonical_window`."""
|
||||
# Verify that calculation can fail:
|
||||
with pytest.raises(ValueError, match="hop=5 is larger than window len.*"):
|
||||
_calc_dual_canonical_window(np.ones(4), 5)
|
||||
with pytest.raises(ValueError, match=".* Transform not invertible!"):
|
||||
_calc_dual_canonical_window(np.array([.1, .2, .3, 0]), 4)
|
||||
|
||||
# Verify that parameter `win` may not be integers:
|
||||
with pytest.raises(ValueError, match="Parameter 'win' cannot be of int.*"):
|
||||
_calc_dual_canonical_window(np.ones(4, dtype=int), 1)
|
||||
|
||||
|
||||
def test_invalid_initializer_parameters():
|
||||
"""Verify that exceptions get raised on invalid parameters when
|
||||
instantiating ShortTimeFFT. """
|
||||
with pytest.raises(ValueError, match=r"Parameter win must be 1d, " +
|
||||
r"but win.shape=\(2, 2\)!"):
|
||||
ShortTimeFFT(np.ones((2, 2)), hop=4, fs=1)
|
||||
with pytest.raises(ValueError, match="Parameter win must have " +
|
||||
"finite entries"):
|
||||
ShortTimeFFT(np.array([1, np.inf, 2, 3]), hop=4, fs=1)
|
||||
with pytest.raises(ValueError, match="Parameter hop=0 is not " +
|
||||
"an integer >= 1!"):
|
||||
ShortTimeFFT(np.ones(4), hop=0, fs=1)
|
||||
with pytest.raises(ValueError, match="Parameter hop=2.0 is not " +
|
||||
"an integer >= 1!"):
|
||||
# noinspection PyTypeChecker
|
||||
ShortTimeFFT(np.ones(4), hop=2.0, fs=1)
|
||||
with pytest.raises(ValueError, match=r"dual_win.shape=\(5,\) must equal " +
|
||||
r"win.shape=\(4,\)!"):
|
||||
ShortTimeFFT(np.ones(4), hop=2, fs=1, dual_win=np.ones(5))
|
||||
with pytest.raises(ValueError, match="Parameter dual_win must be " +
|
||||
"a finite array!"):
|
||||
ShortTimeFFT(np.ones(3), hop=2, fs=1,
|
||||
dual_win=np.array([np.nan, 2, 3]))
|
||||
|
||||
|
||||
def test_exceptions_properties_methods():
|
||||
"""Verify that exceptions get raised when setting properties or calling
|
||||
method of ShortTimeFFT to/with invalid values."""
|
||||
SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1)
|
||||
with pytest.raises(ValueError, match="Sampling interval T=-1 must be " +
|
||||
"positive!"):
|
||||
SFT.T = -1
|
||||
with pytest.raises(ValueError, match="Sampling frequency fs=-1 must be " +
|
||||
"positive!"):
|
||||
SFT.fs = -1
|
||||
with pytest.raises(ValueError, match="fft_mode='invalid_typ' not in " +
|
||||
r"\('twosided', 'centered', " +
|
||||
r"'onesided', 'onesided2X'\)!"):
|
||||
SFT.fft_mode = 'invalid_typ'
|
||||
with pytest.raises(ValueError, match="For scaling is None, " +
|
||||
"fft_mode='onesided2X' is invalid.*"):
|
||||
SFT.fft_mode = 'onesided2X'
|
||||
with pytest.raises(ValueError, match="Attribute mfft=7 needs to be " +
|
||||
"at least the window length.*"):
|
||||
SFT.mfft = 7
|
||||
with pytest.raises(ValueError, match="scaling='invalid' not in.*"):
|
||||
# noinspection PyTypeChecker
|
||||
SFT.scale_to('invalid')
|
||||
with pytest.raises(ValueError, match="phase_shift=3.0 has the unit .*"):
|
||||
SFT.phase_shift = 3.0
|
||||
with pytest.raises(ValueError, match="-mfft < phase_shift < mfft " +
|
||||
"does not hold.*"):
|
||||
SFT.phase_shift = 2*SFT.mfft
|
||||
with pytest.raises(ValueError, match="Parameter padding='invalid' not.*"):
|
||||
# noinspection PyTypeChecker
|
||||
g = SFT._x_slices(np.zeros(16), k_off=0, p0=0, p1=1, padding='invalid')
|
||||
next(g) # execute generator
|
||||
with pytest.raises(ValueError, match="Trend type must be 'linear' " +
|
||||
"or 'constant'"):
|
||||
# noinspection PyTypeChecker
|
||||
SFT.stft_detrend(np.zeros(16), detr='invalid')
|
||||
with pytest.raises(ValueError, match="Parameter detr=nan is not a str, " +
|
||||
"function or None!"):
|
||||
# noinspection PyTypeChecker
|
||||
SFT.stft_detrend(np.zeros(16), detr=np.nan)
|
||||
with pytest.raises(ValueError, match="Invalid Parameter p0=0, p1=200.*"):
|
||||
SFT.p_range(100, 0, 200)
|
||||
|
||||
with pytest.raises(ValueError, match="f_axis=0 may not be equal to " +
|
||||
"t_axis=0!"):
|
||||
SFT.istft(np.zeros((SFT.f_pts, 2)), t_axis=0, f_axis=0)
|
||||
with pytest.raises(ValueError, match=r"S.shape\[f_axis\]=2 must be equal" +
|
||||
" to self.f_pts=5.*"):
|
||||
SFT.istft(np.zeros((2, 2)))
|
||||
with pytest.raises(ValueError, match=r"S.shape\[t_axis\]=1 needs to have" +
|
||||
" at least 2 slices.*"):
|
||||
SFT.istft(np.zeros((SFT.f_pts, 1)))
|
||||
with pytest.raises(ValueError, match=r".*\(k1=100\) <= \(k_max=12\) " +
|
||||
"is false!$"):
|
||||
SFT.istft(np.zeros((SFT.f_pts, 3)), k1=100)
|
||||
with pytest.raises(ValueError, match=r"\(k1=1\) - \(k0=0\) = 1 has to " +
|
||||
"be at least.* length 4!"):
|
||||
SFT.istft(np.zeros((SFT.f_pts, 3)), k0=0, k1=1)
|
||||
|
||||
with pytest.raises(ValueError, match=r"Parameter axes_seq='invalid' " +
|
||||
r"not in \['tf', 'ft'\]!"):
|
||||
# noinspection PyTypeChecker
|
||||
SFT.extent(n=100, axes_seq='invalid')
|
||||
with pytest.raises(ValueError, match="Attribute fft_mode=twosided must.*"):
|
||||
SFT.fft_mode = 'twosided'
|
||||
SFT.extent(n=100)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('m', ('onesided', 'onesided2X'))
|
||||
def test_exceptions_fft_mode_complex_win(m: FFT_MODE_TYPE):
|
||||
"""Verify that one-sided spectra are not allowed with complex-valued
|
||||
windows or with complex-valued signals.
|
||||
|
||||
The reason being, the `rfft` function only accepts real-valued input.
|
||||
"""
|
||||
with pytest.raises(ValueError,
|
||||
match=f"One-sided spectra, i.e., fft_mode='{m}'.*"):
|
||||
ShortTimeFFT(np.ones(8)*1j, hop=4, fs=1, fft_mode=m)
|
||||
|
||||
SFT = ShortTimeFFT(np.ones(8)*1j, hop=4, fs=1, fft_mode='twosided')
|
||||
with pytest.raises(ValueError,
|
||||
match=f"One-sided spectra, i.e., fft_mode='{m}'.*"):
|
||||
SFT.fft_mode = m
|
||||
|
||||
SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1, scale_to='psd', fft_mode='onesided')
|
||||
with pytest.raises(ValueError, match="Complex-valued `x` not allowed for self.*"):
|
||||
SFT.stft(np.ones(8)*1j)
|
||||
SFT.fft_mode = 'onesided2X'
|
||||
with pytest.raises(ValueError, match="Complex-valued `x` not allowed for self.*"):
|
||||
SFT.stft(np.ones(8)*1j)
|
||||
|
||||
|
||||
def test_invalid_fft_mode_RuntimeError():
|
||||
"""Ensure exception gets raised when property `fft_mode` is invalid. """
|
||||
SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1)
|
||||
SFT._fft_mode = 'invalid_typ'
|
||||
|
||||
with pytest.raises(RuntimeError):
|
||||
_ = SFT.f
|
||||
with pytest.raises(RuntimeError):
|
||||
SFT._fft_func(np.ones(8))
|
||||
with pytest.raises(RuntimeError):
|
||||
SFT._ifft_func(np.ones(8))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('win_params, Nx', [(('gaussian', 2.), 9), # in docstr
|
||||
('triang', 7),
|
||||
(('kaiser', 4.0), 9),
|
||||
(('exponential', None, 1.), 9),
|
||||
(4.0, 9)])
|
||||
def test_from_window(win_params, Nx: int):
|
||||
"""Verify that `from_window()` handles parameters correctly.
|
||||
|
||||
The window parameterizations are documented in the `get_window` docstring.
|
||||
"""
|
||||
w_sym, fs = get_window(win_params, Nx, fftbins=False), 16.
|
||||
w_per = get_window(win_params, Nx, fftbins=True)
|
||||
SFT0 = ShortTimeFFT(w_sym, hop=3, fs=fs, fft_mode='twosided',
|
||||
scale_to='psd', phase_shift=1)
|
||||
nperseg = len(w_sym)
|
||||
noverlap = nperseg - SFT0.hop
|
||||
SFT1 = ShortTimeFFT.from_window(win_params, fs, nperseg, noverlap,
|
||||
symmetric_win=True, fft_mode='twosided',
|
||||
scale_to='psd', phase_shift=1)
|
||||
# periodic window:
|
||||
SFT2 = ShortTimeFFT.from_window(win_params, fs, nperseg, noverlap,
|
||||
symmetric_win=False, fft_mode='twosided',
|
||||
scale_to='psd', phase_shift=1)
|
||||
# Be informative when comparing instances:
|
||||
assert_equal(SFT1.win, SFT0.win)
|
||||
assert_allclose(SFT2.win, w_per / np.sqrt(sum(w_per**2) * fs))
|
||||
for n_ in ('hop', 'T', 'fft_mode', 'mfft', 'scaling', 'phase_shift'):
|
||||
v0, v1, v2 = (getattr(SFT_, n_) for SFT_ in (SFT0, SFT1, SFT2))
|
||||
assert v1 == v0, f"SFT1.{n_}={v1} does not equal SFT0.{n_}={v0}"
|
||||
assert v2 == v0, f"SFT2.{n_}={v2} does not equal SFT0.{n_}={v0}"
|
||||
|
||||
|
||||
def test_dual_win_roundtrip():
|
||||
"""Verify the duality of `win` and `dual_win`.
|
||||
|
||||
Note that this test does not work for arbitrary windows, since dual windows
|
||||
are not unique. It always works for invertible STFTs if the windows do not
|
||||
overlap.
|
||||
"""
|
||||
# Non-standard values for keyword arguments (except for `scale_to`):
|
||||
kw = dict(hop=4, fs=1, fft_mode='twosided', mfft=8, scale_to=None,
|
||||
phase_shift=2)
|
||||
SFT0 = ShortTimeFFT(np.ones(4), **kw)
|
||||
SFT1 = ShortTimeFFT.from_dual(SFT0.dual_win, **kw)
|
||||
assert_allclose(SFT1.dual_win, SFT0.win)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('scale_to, fac_psd, fac_mag',
|
||||
[(None, 0.25, 0.125),
|
||||
('magnitude', 2.0, 1),
|
||||
('psd', 1, 0.5)])
|
||||
def test_scaling(scale_to: Literal['magnitude', 'psd'], fac_psd, fac_mag):
|
||||
"""Verify scaling calculations.
|
||||
|
||||
* Verify passing `scale_to`parameter to ``__init__().
|
||||
* Roundtrip while changing scaling factor.
|
||||
"""
|
||||
SFT = ShortTimeFFT(np.ones(4) * 2, hop=4, fs=1, scale_to=scale_to)
|
||||
assert SFT.fac_psd == fac_psd
|
||||
assert SFT.fac_magnitude == fac_mag
|
||||
# increase coverage by accessing properties twice:
|
||||
assert SFT.fac_psd == fac_psd
|
||||
assert SFT.fac_magnitude == fac_mag
|
||||
|
||||
x = np.fft.irfft([0, 0, 7, 0, 0, 0, 0]) # periodic signal
|
||||
Sx = SFT.stft(x)
|
||||
Sx_mag, Sx_psd = Sx * SFT.fac_magnitude, Sx * SFT.fac_psd
|
||||
|
||||
SFT.scale_to('magnitude')
|
||||
x_mag = SFT.istft(Sx_mag, k1=len(x))
|
||||
assert_allclose(x_mag, x)
|
||||
|
||||
SFT.scale_to('psd')
|
||||
x_psd = SFT.istft(Sx_psd, k1=len(x))
|
||||
assert_allclose(x_psd, x)
|
||||
|
||||
|
||||
def test_scale_to():
|
||||
"""Verify `scale_to()` method."""
|
||||
SFT = ShortTimeFFT(np.ones(4) * 2, hop=4, fs=1, scale_to=None)
|
||||
|
||||
SFT.scale_to('magnitude')
|
||||
assert SFT.scaling == 'magnitude'
|
||||
assert SFT.fac_psd == 2.0
|
||||
assert SFT.fac_magnitude == 1
|
||||
|
||||
SFT.scale_to('psd')
|
||||
assert SFT.scaling == 'psd'
|
||||
assert SFT.fac_psd == 1
|
||||
assert SFT.fac_magnitude == 0.5
|
||||
|
||||
SFT.scale_to('psd') # needed for coverage
|
||||
|
||||
for scale, s_fac in zip(('magnitude', 'psd'), (8, 4)):
|
||||
SFT = ShortTimeFFT(np.ones(4) * 2, hop=4, fs=1, scale_to=None)
|
||||
dual_win = SFT.dual_win.copy()
|
||||
|
||||
SFT.scale_to(cast(Literal['magnitude', 'psd'], scale))
|
||||
assert_allclose(SFT.dual_win, dual_win * s_fac)
|
||||
|
||||
|
||||
def test_x_slices_padding():
|
||||
"""Verify padding.
|
||||
|
||||
The reference arrays were taken from the docstrings of `zero_ext`,
|
||||
`const_ext`, `odd_ext()`, and `even_ext()` from the _array_tools module.
|
||||
"""
|
||||
SFT = ShortTimeFFT(np.ones(5), hop=4, fs=1)
|
||||
x = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]], dtype=float)
|
||||
d = {'zeros': [[[0, 0, 1, 2, 3], [0, 0, 0, 1, 4]],
|
||||
[[3, 4, 5, 0, 0], [4, 9, 16, 0, 0]]],
|
||||
'edge': [[[1, 1, 1, 2, 3], [0, 0, 0, 1, 4]],
|
||||
[[3, 4, 5, 5, 5], [4, 9, 16, 16, 16]]],
|
||||
'even': [[[3, 2, 1, 2, 3], [4, 1, 0, 1, 4]],
|
||||
[[3, 4, 5, 4, 3], [4, 9, 16, 9, 4]]],
|
||||
'odd': [[[-1, 0, 1, 2, 3], [-4, -1, 0, 1, 4]],
|
||||
[[3, 4, 5, 6, 7], [4, 9, 16, 23, 28]]]}
|
||||
for p_, xx in d.items():
|
||||
gen = SFT._x_slices(np.array(x), 0, 0, 2, padding=cast(PAD_TYPE, p_))
|
||||
yy = np.array([y_.copy() for y_ in gen]) # due to inplace copying
|
||||
assert_equal(yy, xx, err_msg=f"Failed '{p_}' padding.")
|
||||
|
||||
|
||||
def test_invertible():
|
||||
"""Verify `invertible` property. """
|
||||
SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1)
|
||||
assert SFT.invertible
|
||||
SFT = ShortTimeFFT(np.ones(8), hop=9, fs=1)
|
||||
assert not SFT.invertible
|
||||
|
||||
|
||||
def test_border_values():
|
||||
"""Ensure that minimum and maximum values of slices are correct."""
|
||||
SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1)
|
||||
assert SFT.p_min == 0
|
||||
assert SFT.k_min == -4
|
||||
assert SFT.lower_border_end == (4, 1)
|
||||
assert SFT.lower_border_end == (4, 1) # needed to test caching
|
||||
assert SFT.p_max(10) == 4
|
||||
assert SFT.k_max(10) == 16
|
||||
assert SFT.upper_border_begin(10) == (4, 2)
|
||||
|
||||
|
||||
def test_border_values_exotic():
|
||||
"""Ensure that the border calculations are correct for windows with
|
||||
zeros. """
|
||||
w = np.array([0, 0, 0, 0, 0, 0, 0, 1.])
|
||||
SFT = ShortTimeFFT(w, hop=1, fs=1)
|
||||
assert SFT.lower_border_end == (0, 0)
|
||||
|
||||
SFT = ShortTimeFFT(np.flip(w), hop=20, fs=1)
|
||||
assert SFT.upper_border_begin(4) == (0, 0)
|
||||
|
||||
SFT._hop = -1 # provoke unreachable line
|
||||
with pytest.raises(RuntimeError):
|
||||
_ = SFT.k_max(4)
|
||||
with pytest.raises(RuntimeError):
|
||||
_ = SFT.k_min
|
||||
|
||||
|
||||
def test_t():
|
||||
"""Verify that the times of the slices are correct. """
|
||||
SFT = ShortTimeFFT(np.ones(8), hop=4, fs=2)
|
||||
assert SFT.T == 1/2
|
||||
assert SFT.fs == 2.
|
||||
assert SFT.delta_t == 4 * 1/2
|
||||
t_stft = np.arange(0, SFT.p_max(10)) * SFT.delta_t
|
||||
assert_equal(SFT.t(10), t_stft)
|
||||
assert_equal(SFT.t(10, 1, 3), t_stft[1:3])
|
||||
SFT.T = 1/4
|
||||
assert SFT.T == 1/4
|
||||
assert SFT.fs == 4
|
||||
SFT.fs = 1/8
|
||||
assert SFT.fs == 1/8
|
||||
assert SFT.T == 8
|
||||
|
||||
|
||||
@pytest.mark.parametrize('fft_mode, f',
|
||||
[('onesided', [0., 1., 2.]),
|
||||
('onesided2X', [0., 1., 2.]),
|
||||
('twosided', [0., 1., 2., -2., -1.]),
|
||||
('centered', [-2., -1., 0., 1., 2.])])
|
||||
def test_f(fft_mode: FFT_MODE_TYPE, f):
|
||||
"""Verify the frequency values property `f`."""
|
||||
SFT = ShortTimeFFT(np.ones(5), hop=4, fs=5, fft_mode=fft_mode,
|
||||
scale_to='psd')
|
||||
assert_equal(SFT.f, f)
|
||||
|
||||
|
||||
def test_extent():
|
||||
"""Ensure that the `extent()` method is correct. """
|
||||
SFT = ShortTimeFFT(np.ones(32), hop=4, fs=32, fft_mode='onesided')
|
||||
assert SFT.extent(100, 'tf', False) == (-0.375, 3.625, 0.0, 17.0)
|
||||
assert SFT.extent(100, 'ft', False) == (0.0, 17.0, -0.375, 3.625)
|
||||
assert SFT.extent(100, 'tf', True) == (-0.4375, 3.5625, -0.5, 16.5)
|
||||
assert SFT.extent(100, 'ft', True) == (-0.5, 16.5, -0.4375, 3.5625)
|
||||
|
||||
SFT = ShortTimeFFT(np.ones(32), hop=4, fs=32, fft_mode='centered')
|
||||
assert SFT.extent(100, 'tf', False) == (-0.375, 3.625, -16.0, 15.0)
|
||||
|
||||
|
||||
def test_spectrogram():
|
||||
"""Verify spectrogram and cross-spectrogram methods. """
|
||||
SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1)
|
||||
x, y = np.ones(10), np.arange(10)
|
||||
X, Y = SFT.stft(x), SFT.stft(y)
|
||||
assert_allclose(SFT.spectrogram(x), X.real**2+X.imag**2)
|
||||
assert_allclose(SFT.spectrogram(x, y), X * Y.conj())
|
||||
|
||||
|
||||
@pytest.mark.parametrize('n', [8, 9])
|
||||
def test_fft_func_roundtrip(n: int):
|
||||
"""Test roundtrip `ifft_func(fft_func(x)) == x` for all permutations of
|
||||
relevant parameters. """
|
||||
np.random.seed(2394795)
|
||||
x0 = np.random.rand(n)
|
||||
w, h_n = np.ones(n), 4
|
||||
|
||||
pp = dict(
|
||||
fft_mode=get_args(FFT_MODE_TYPE),
|
||||
mfft=[None, n, n+1, n+2],
|
||||
scaling=[None, 'magnitude', 'psd'],
|
||||
phase_shift=[None, -n+1, 0, n // 2, n-1])
|
||||
for f_typ, mfft, scaling, phase_shift in product(*pp.values()):
|
||||
if f_typ == 'onesided2X' and scaling is None:
|
||||
continue # this combination is forbidden
|
||||
SFT = ShortTimeFFT(w, h_n, fs=n, fft_mode=f_typ, mfft=mfft,
|
||||
scale_to=scaling, phase_shift=phase_shift)
|
||||
X0 = SFT._fft_func(x0)
|
||||
x1 = SFT._ifft_func(X0)
|
||||
assert_allclose(x0, x1, err_msg="_fft_func() roundtrip failed for " +
|
||||
f"{f_typ=}, {mfft=}, {scaling=}, {phase_shift=}")
|
||||
|
||||
SFT = ShortTimeFFT(w, h_n, fs=1)
|
||||
SFT._fft_mode = 'invalid_fft' # type: ignore
|
||||
with pytest.raises(RuntimeError):
|
||||
SFT._fft_func(x0)
|
||||
with pytest.raises(RuntimeError):
|
||||
SFT._ifft_func(x0)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('i', range(19))
|
||||
def test_impulse_roundtrip(i):
|
||||
"""Roundtrip for an impulse being at different positions `i`."""
|
||||
n = 19
|
||||
w, h_n = np.ones(8), 3
|
||||
x = np.zeros(n)
|
||||
x[i] = 1
|
||||
|
||||
SFT = ShortTimeFFT(w, hop=h_n, fs=1, scale_to=None, phase_shift=None)
|
||||
Sx = SFT.stft(x)
|
||||
# test slicing the input signal into two parts:
|
||||
n_q = SFT.nearest_k_p(n // 2)
|
||||
Sx0 = SFT.stft(x[:n_q], padding='zeros')
|
||||
Sx1 = SFT.stft(x[n_q:], padding='zeros')
|
||||
q0_ub = SFT.upper_border_begin(n_q)[1] - SFT.p_min
|
||||
q1_le = SFT.lower_border_end[1] - SFT.p_min
|
||||
assert_allclose(Sx0[:, :q0_ub], Sx[:, :q0_ub], err_msg=f"{i=}")
|
||||
assert_allclose(Sx1[:, q1_le:], Sx[:, q1_le-Sx1.shape[1]:],
|
||||
err_msg=f"{i=}")
|
||||
|
||||
Sx01 = np.hstack((Sx0[:, :q0_ub],
|
||||
Sx0[:, q0_ub:] + Sx1[:, :q1_le],
|
||||
Sx1[:, q1_le:]))
|
||||
assert_allclose(Sx, Sx01, atol=1e-8, err_msg=f"{i=}")
|
||||
|
||||
y = SFT.istft(Sx, 0, n)
|
||||
assert_allclose(y, x, atol=1e-8, err_msg=f"{i=}")
|
||||
y0 = SFT.istft(Sx, 0, n//2)
|
||||
assert_allclose(x[:n//2], y0, atol=1e-8, err_msg=f"{i=}")
|
||||
y1 = SFT.istft(Sx, n // 2, n)
|
||||
assert_allclose(x[n // 2:], y1, atol=1e-8, err_msg=f"{i=}")
|
||||
|
||||
|
||||
@pytest.mark.parametrize('hop', [1, 7, 8])
|
||||
def test_asymmetric_window_roundtrip(hop: int):
|
||||
"""An asymmetric window could uncover indexing problems. """
|
||||
np.random.seed(23371)
|
||||
|
||||
w = np.arange(16) / 8 # must be of type float
|
||||
w[len(w)//2:] = 1
|
||||
SFT = ShortTimeFFT(w, hop, fs=1)
|
||||
|
||||
x = 10 * np.random.randn(64)
|
||||
Sx = SFT.stft(x)
|
||||
x1 = SFT.istft(Sx, k1=len(x))
|
||||
assert_allclose(x1, x1, err_msg="Roundtrip for asymmetric window with " +
|
||||
f" {hop=} failed!")
|
||||
|
||||
|
||||
@pytest.mark.parametrize('m_num', [6, 7])
|
||||
def test_minimal_length_signal(m_num):
|
||||
"""Verify that the shortest allowed signal works. """
|
||||
SFT = ShortTimeFFT(np.ones(m_num), m_num//2, fs=1)
|
||||
n = math.ceil(m_num/2)
|
||||
x = np.ones(n)
|
||||
Sx = SFT.stft(x)
|
||||
x1 = SFT.istft(Sx, k1=n)
|
||||
assert_allclose(x1, x, err_msg=f"Roundtrip minimal length signal ({n=})" +
|
||||
f" for {m_num} sample window failed!")
|
||||
with pytest.raises(ValueError, match=rf"len\(x\)={n-1} must be >= ceil.*"):
|
||||
SFT.stft(x[:-1])
|
||||
with pytest.raises(ValueError, match=rf"S.shape\[t_axis\]={Sx.shape[1]-1}"
|
||||
f" needs to have at least {Sx.shape[1]} slices"):
|
||||
SFT.istft(Sx[:, :-1], k1=n)
|
||||
|
||||
|
||||
def test_tutorial_stft_sliding_win():
|
||||
"""Verify example in "Sliding Windows" subsection from the "User Guide".
|
||||
|
||||
In :ref:`tutorial_stft_sliding_win` (file ``signal.rst``) of the
|
||||
:ref:`user_guide` the behavior the border behavior of
|
||||
``ShortTimeFFT(np.ones(6), 2, fs=1)`` with a 50 sample signal is discussed.
|
||||
This test verifies the presented indexes.
|
||||
"""
|
||||
SFT = ShortTimeFFT(np.ones(6), 2, fs=1)
|
||||
|
||||
# Lower border:
|
||||
assert SFT.m_num_mid == 3, f"Slice middle is not 3 but {SFT.m_num_mid=}"
|
||||
assert SFT.p_min == -1, f"Lowest slice {SFT.p_min=} is not -1"
|
||||
assert SFT.k_min == -5, f"Lowest slice sample {SFT.p_min=} is not -5"
|
||||
k_lb, p_lb = SFT.lower_border_end
|
||||
assert p_lb == 2, f"First unaffected slice {p_lb=} is not 2"
|
||||
assert k_lb == 5, f"First unaffected sample {k_lb=} is not 5"
|
||||
|
||||
n = 50 # upper signal border
|
||||
assert (p_max := SFT.p_max(n)) == 27, f"Last slice {p_max=} must be 27"
|
||||
assert (k_max := SFT.k_max(n)) == 55, f"Last sample {k_max=} must be 55"
|
||||
k_ub, p_ub = SFT.upper_border_begin(n)
|
||||
assert p_ub == 24, f"First upper border slice {p_ub=} must be 24"
|
||||
assert k_ub == 45, f"First upper border slice {k_ub=} must be 45"
|
||||
|
||||
|
||||
def test_tutorial_stft_legacy_stft():
|
||||
"""Verify STFT example in "Comparison with Legacy Implementation" from the
|
||||
"User Guide".
|
||||
|
||||
In :ref:`tutorial_stft_legacy_stft` (file ``signal.rst``) of the
|
||||
:ref:`user_guide` the legacy and the new implementation are compared.
|
||||
"""
|
||||
fs, N = 200, 1001 # # 200 Hz sampling rate for 5 s signal
|
||||
t_z = np.arange(N) / fs # time indexes for signal
|
||||
z = np.exp(2j*np.pi * 70 * (t_z - 0.2 * t_z ** 2)) # complex-valued chirp
|
||||
|
||||
nperseg, noverlap = 50, 40
|
||||
win = ('gaussian', 1e-2 * fs) # Gaussian with 0.01 s standard deviation
|
||||
|
||||
# Legacy STFT:
|
||||
f0_u, t0, Sz0_u = stft(z, fs, win, nperseg, noverlap,
|
||||
return_onesided=False, scaling='spectrum')
|
||||
Sz0 = fftshift(Sz0_u, axes=0)
|
||||
|
||||
# New STFT:
|
||||
SFT = ShortTimeFFT.from_window(win, fs, nperseg, noverlap,
|
||||
fft_mode='centered',
|
||||
scale_to='magnitude', phase_shift=None)
|
||||
Sz1 = SFT.stft(z)
|
||||
|
||||
assert_allclose(Sz0, Sz1[:, 2:-1])
|
||||
|
||||
assert_allclose((abs(Sz1[:, 1]).min(), abs(Sz1[:, 1]).max()),
|
||||
(6.925060911593139e-07, 8.00271269218721e-07))
|
||||
|
||||
t0_r, z0_r = istft(Sz0_u, fs, win, nperseg, noverlap, input_onesided=False,
|
||||
scaling='spectrum')
|
||||
z1_r = SFT.istft(Sz1, k1=N)
|
||||
assert len(z0_r) == N + 9
|
||||
assert_allclose(z0_r[:N], z)
|
||||
assert_allclose(z1_r, z)
|
||||
|
||||
# Spectrogram is just the absolute square of th STFT:
|
||||
assert_allclose(SFT.spectrogram(z), abs(Sz1) ** 2)
|
||||
|
||||
|
||||
def test_tutorial_stft_legacy_spectrogram():
|
||||
"""Verify spectrogram example in "Comparison with Legacy Implementation"
|
||||
from the "User Guide".
|
||||
|
||||
In :ref:`tutorial_stft_legacy_stft` (file ``signal.rst``) of the
|
||||
:ref:`user_guide` the legacy and the new implementation are compared.
|
||||
"""
|
||||
fs, N = 200, 1001 # 200 Hz sampling rate for almost 5 s signal
|
||||
t_z = np.arange(N) / fs # time indexes for signal
|
||||
z = np.exp(2j*np.pi*70 * (t_z - 0.2*t_z**2)) # complex-valued sweep
|
||||
|
||||
nperseg, noverlap = 50, 40
|
||||
win = ('gaussian', 1e-2 * fs) # Gaussian with 0.01 s standard dev.
|
||||
|
||||
# Legacy spectrogram:
|
||||
f2_u, t2, Sz2_u = spectrogram(z, fs, win, nperseg, noverlap, detrend=None,
|
||||
return_onesided=False, scaling='spectrum',
|
||||
mode='complex')
|
||||
|
||||
f2, Sz2 = fftshift(f2_u), fftshift(Sz2_u, axes=0)
|
||||
|
||||
# New STFT:
|
||||
SFT = ShortTimeFFT.from_window(win, fs, nperseg, noverlap,
|
||||
fft_mode='centered', scale_to='magnitude',
|
||||
phase_shift=None)
|
||||
Sz3 = SFT.stft(z, p0=0, p1=(N-noverlap) // SFT.hop, k_offset=nperseg // 2)
|
||||
t3 = SFT.t(N, p0=0, p1=(N-noverlap) // SFT.hop, k_offset=nperseg // 2)
|
||||
|
||||
assert_allclose(t2, t3)
|
||||
assert_allclose(f2, SFT.f)
|
||||
assert_allclose(Sz2, Sz3)
|
||||
|
||||
|
||||
def test_permute_axes():
|
||||
"""Verify correctness of four-dimensional signal by permuting its
|
||||
shape. """
|
||||
n = 25
|
||||
SFT = ShortTimeFFT(np.ones(8)/8, hop=3, fs=n)
|
||||
x0 = np.arange(n)
|
||||
Sx0 = SFT.stft(x0)
|
||||
Sx0 = Sx0.reshape((Sx0.shape[0], 1, 1, 1, Sx0.shape[-1]))
|
||||
SxT = np.moveaxis(Sx0, (0, -1), (-1, 0))
|
||||
|
||||
atol = 2 * np.finfo(SFT.win.dtype).resolution
|
||||
for i in range(4):
|
||||
y = np.reshape(x0, np.roll((n, 1, 1, 1), i))
|
||||
Sy = SFT.stft(y, axis=i)
|
||||
assert_allclose(Sy, np.moveaxis(Sx0, 0, i))
|
||||
|
||||
yb0 = SFT.istft(Sy, k1=n, f_axis=i)
|
||||
assert_allclose(yb0, y, atol=atol)
|
||||
# explicit t-axis parameter (for coverage):
|
||||
yb1 = SFT.istft(Sy, k1=n, f_axis=i, t_axis=Sy.ndim-1)
|
||||
assert_allclose(yb1, y, atol=atol)
|
||||
|
||||
SyT = np.moveaxis(Sy, (i, -1), (-1, i))
|
||||
assert_allclose(SyT, np.moveaxis(SxT, 0, i))
|
||||
|
||||
ybT = SFT.istft(SyT, k1=n, t_axis=i, f_axis=-1)
|
||||
assert_allclose(ybT, y, atol=atol)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("fft_mode",
|
||||
('twosided', 'centered', 'onesided', 'onesided2X'))
|
||||
def test_roundtrip_multidimensional(fft_mode: FFT_MODE_TYPE):
|
||||
"""Test roundtrip of a multidimensional input signal versus its components.
|
||||
|
||||
This test can uncover potential problems with `fftshift()`.
|
||||
"""
|
||||
n = 9
|
||||
x = np.arange(4*n*2).reshape(4, n, 2)
|
||||
SFT = ShortTimeFFT(get_window('hann', 4), hop=2, fs=1,
|
||||
scale_to='magnitude', fft_mode=fft_mode)
|
||||
Sx = SFT.stft(x, axis=1)
|
||||
y = SFT.istft(Sx, k1=n, f_axis=1, t_axis=-1)
|
||||
assert_allclose(y, x, err_msg='Multidim. roundtrip failed!')
|
||||
|
||||
for i, j in product(range(x.shape[0]), range(x.shape[2])):
|
||||
y_ = SFT.istft(Sx[i, :, j, :], k1=n)
|
||||
assert_allclose(y_, x[i, :, j], err_msg="Multidim. roundtrip for component " +
|
||||
f"x[{i}, :, {j}] and {fft_mode=} failed!")
|
||||
|
||||
|
||||
@pytest.mark.parametrize('window, n, nperseg, noverlap',
|
||||
[('boxcar', 100, 10, 0), # Test no overlap
|
||||
('boxcar', 100, 10, 9), # Test high overlap
|
||||
('bartlett', 101, 51, 26), # Test odd nperseg
|
||||
('hann', 1024, 256, 128), # Test defaults
|
||||
(('tukey', 0.5), 1152, 256, 64), # Test Tukey
|
||||
('hann', 1024, 256, 255), # Test overlapped hann
|
||||
('boxcar', 100, 10, 3), # NOLA True, COLA False
|
||||
('bartlett', 101, 51, 37), # NOLA True, COLA False
|
||||
('hann', 1024, 256, 127), # NOLA True, COLA False
|
||||
# NOLA True, COLA False:
|
||||
(('tukey', 0.5), 1152, 256, 14),
|
||||
('hann', 1024, 256, 5)]) # NOLA True, COLA False
|
||||
def test_roundtrip_windows(window, n: int, nperseg: int, noverlap: int):
|
||||
"""Roundtrip test adapted from `test_spectral.TestSTFT`.
|
||||
|
||||
The parameters are taken from the methods test_roundtrip_real(),
|
||||
test_roundtrip_nola_not_cola(), test_roundtrip_float32(),
|
||||
test_roundtrip_complex().
|
||||
"""
|
||||
np.random.seed(2394655)
|
||||
|
||||
w = get_window(window, nperseg)
|
||||
SFT = ShortTimeFFT(w, nperseg - noverlap, fs=1, fft_mode='twosided',
|
||||
phase_shift=None)
|
||||
|
||||
z = 10 * np.random.randn(n) + 10j * np.random.randn(n)
|
||||
Sz = SFT.stft(z)
|
||||
z1 = SFT.istft(Sz, k1=len(z))
|
||||
assert_allclose(z, z1, err_msg="Roundtrip for complex values failed")
|
||||
|
||||
x = 10 * np.random.randn(n)
|
||||
Sx = SFT.stft(x)
|
||||
x1 = SFT.istft(Sx, k1=len(z))
|
||||
assert_allclose(x, x1, err_msg="Roundtrip for float values failed")
|
||||
|
||||
x32 = x.astype(np.float32)
|
||||
Sx32 = SFT.stft(x32)
|
||||
x32_1 = SFT.istft(Sx32, k1=len(x32))
|
||||
assert_allclose(x32, x32_1,
|
||||
err_msg="Roundtrip for 32 Bit float values failed")
|
||||
|
||||
|
||||
@pytest.mark.parametrize('signal_type', ('real', 'complex'))
|
||||
def test_roundtrip_complex_window(signal_type):
|
||||
"""Test roundtrip for complex-valued window function
|
||||
|
||||
The purpose of this test is to check if the dual window is calculated
|
||||
correctly for complex-valued windows.
|
||||
"""
|
||||
np.random.seed(1354654)
|
||||
win = np.exp(2j*np.linspace(0, np.pi, 8))
|
||||
SFT = ShortTimeFFT(win, 3, fs=1, fft_mode='twosided')
|
||||
|
||||
z = 10 * np.random.randn(11)
|
||||
if signal_type == 'complex':
|
||||
z = z + 2j * z
|
||||
Sz = SFT.stft(z)
|
||||
z1 = SFT.istft(Sz, k1=len(z))
|
||||
assert_allclose(z, z1,
|
||||
err_msg="Roundtrip for complex-valued window failed")
|
||||
|
||||
|
||||
def test_average_all_segments():
|
||||
"""Compare `welch` function with stft mean.
|
||||
|
||||
Ported from `TestSpectrogram.test_average_all_segments` from file
|
||||
``test__spectral.py``.
|
||||
"""
|
||||
x = np.random.randn(1024)
|
||||
|
||||
fs = 1.0
|
||||
window = ('tukey', 0.25)
|
||||
nperseg, noverlap = 16, 2
|
||||
fw, Pw = welch(x, fs, window, nperseg, noverlap)
|
||||
SFT = ShortTimeFFT.from_window(window, fs, nperseg, noverlap,
|
||||
fft_mode='onesided2X', scale_to='psd',
|
||||
phase_shift=None)
|
||||
# `welch` positions the window differently than the STFT:
|
||||
P = SFT.spectrogram(x, detr='constant', p0=0,
|
||||
p1=(len(x)-noverlap)//SFT.hop, k_offset=nperseg//2)
|
||||
|
||||
assert_allclose(SFT.f, fw)
|
||||
assert_allclose(np.mean(P, axis=-1), Pw)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('window, N, nperseg, noverlap, mfft',
|
||||
# from test_roundtrip_padded_FFT:
|
||||
[('hann', 1024, 256, 128, 512),
|
||||
('hann', 1024, 256, 128, 501),
|
||||
('boxcar', 100, 10, 0, 33),
|
||||
(('tukey', 0.5), 1152, 256, 64, 1024),
|
||||
# from test_roundtrip_padded_signal:
|
||||
('boxcar', 101, 10, 0, None),
|
||||
('hann', 1000, 256, 128, None),
|
||||
# from test_roundtrip_boundary_extension:
|
||||
('boxcar', 100, 10, 0, None),
|
||||
('boxcar', 100, 10, 9, None)])
|
||||
@pytest.mark.parametrize('padding', get_args(PAD_TYPE))
|
||||
def test_stft_padding_roundtrip(window, N: int, nperseg: int, noverlap: int,
|
||||
mfft: int, padding):
|
||||
"""Test the parameter 'padding' of `stft` with roundtrips.
|
||||
|
||||
The STFT parametrizations were taken from the methods
|
||||
`test_roundtrip_padded_FFT`, `test_roundtrip_padded_signal` and
|
||||
`test_roundtrip_boundary_extension` from class `TestSTFT` in file
|
||||
``test_spectral.py``. Note that the ShortTimeFFT does not need the
|
||||
concept of "boundary extension".
|
||||
"""
|
||||
x = normal_distribution.rvs(size=N, random_state=2909) # real signal
|
||||
z = x * np.exp(1j * np.pi / 4) # complex signal
|
||||
|
||||
SFT = ShortTimeFFT.from_window(window, 1, nperseg, noverlap,
|
||||
fft_mode='twosided', mfft=mfft)
|
||||
Sx = SFT.stft(x, padding=padding)
|
||||
x1 = SFT.istft(Sx, k1=N)
|
||||
assert_allclose(x1, x,
|
||||
err_msg=f"Failed real roundtrip with '{padding}' padding")
|
||||
|
||||
Sz = SFT.stft(z, padding=padding)
|
||||
z1 = SFT.istft(Sz, k1=N)
|
||||
assert_allclose(z1, z, err_msg="Failed complex roundtrip with " +
|
||||
f" '{padding}' padding")
|
||||
|
||||
|
||||
@pytest.mark.parametrize('N_x', (128, 129, 255, 256, 1337)) # signal length
|
||||
@pytest.mark.parametrize('w_size', (128, 256)) # window length
|
||||
@pytest.mark.parametrize('t_step', (4, 64)) # SFT time hop
|
||||
@pytest.mark.parametrize('f_c', (7., 23.)) # frequency of input sine
|
||||
def test_energy_conservation(N_x: int, w_size: int, t_step: int, f_c: float):
|
||||
"""Test if a `psd`-scaled STFT conserves the L2 norm.
|
||||
|
||||
This test is adapted from MNE-Python [1]_. Besides being battle-tested,
|
||||
this test has the benefit of using non-standard window including
|
||||
non-positive values and a 2d input signal.
|
||||
|
||||
Since `ShortTimeFFT` requires the signal length `N_x` to be at least the
|
||||
window length `w_size`, the parameter `N_x` was changed from
|
||||
``(127, 128, 255, 256, 1337)`` to ``(128, 129, 255, 256, 1337)`` to be
|
||||
more useful.
|
||||
|
||||
.. [1] File ``test_stft.py`` of MNE-Python
|
||||
https://github.com/mne-tools/mne-python/blob/main/mne/time_frequency/tests/test_stft.py
|
||||
"""
|
||||
window = np.sin(np.arange(.5, w_size + .5) / w_size * np.pi)
|
||||
SFT = ShortTimeFFT(window, t_step, fs=1000, fft_mode='onesided2X',
|
||||
scale_to='psd')
|
||||
atol = 2*np.finfo(window.dtype).resolution
|
||||
N_x = max(N_x, w_size) # minimal sing
|
||||
# Test with low frequency signal
|
||||
t = np.arange(N_x).astype(np.float64)
|
||||
x = np.sin(2 * np.pi * f_c * t * SFT.T)
|
||||
x = np.array([x, x + 1.])
|
||||
X = SFT.stft(x)
|
||||
xp = SFT.istft(X, k1=N_x)
|
||||
|
||||
max_freq = SFT.f[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))]
|
||||
|
||||
assert X.shape[1] == SFT.f_pts
|
||||
assert np.all(SFT.f >= 0.)
|
||||
assert np.abs(max_freq - f_c) < 1.
|
||||
assert_allclose(x, xp, atol=atol)
|
||||
|
||||
# check L2-norm squared (i.e., energy) conservation:
|
||||
E_x = np.sum(x**2, axis=-1) * SFT.T # numerical integration
|
||||
aX2 = X.real**2 + X.imag.real**2
|
||||
E_X = np.sum(np.sum(aX2, axis=-1) * SFT.delta_t, axis=-1) * SFT.delta_f
|
||||
assert_allclose(E_X, E_x, atol=atol)
|
||||
|
||||
# Test with random signal
|
||||
np.random.seed(2392795)
|
||||
x = np.random.randn(2, N_x)
|
||||
X = SFT.stft(x)
|
||||
xp = SFT.istft(X, k1=N_x)
|
||||
|
||||
assert X.shape[1] == SFT.f_pts
|
||||
assert np.all(SFT.f >= 0.)
|
||||
assert np.abs(max_freq - f_c) < 1.
|
||||
assert_allclose(x, xp, atol=atol)
|
||||
|
||||
# check L2-norm squared (i.e., energy) conservation:
|
||||
E_x = np.sum(x**2, axis=-1) * SFT.T # numeric integration
|
||||
aX2 = X.real ** 2 + X.imag.real ** 2
|
||||
E_X = np.sum(np.sum(aX2, axis=-1) * SFT.delta_t, axis=-1) * SFT.delta_f
|
||||
assert_allclose(E_X, E_x, atol=atol)
|
||||
|
||||
# Try with empty array
|
||||
x = np.zeros((0, N_x))
|
||||
X = SFT.stft(x)
|
||||
xp = SFT.istft(X, k1=N_x)
|
||||
assert xp.shape == x.shape
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,287 @@
|
||||
# Code adapted from "upfirdn" python library with permission:
|
||||
#
|
||||
# Copyright (c) 2009, Motorola, Inc
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright notice,
|
||||
# this list of conditions and the following disclaimer.
|
||||
#
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
#
|
||||
# * Neither the name of Motorola nor the names of its contributors may be
|
||||
# used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
import numpy as np
|
||||
from itertools import product
|
||||
|
||||
from numpy.testing import assert_equal, assert_allclose
|
||||
from pytest import raises as assert_raises
|
||||
import pytest
|
||||
|
||||
from scipy.signal import upfirdn, firwin
|
||||
from scipy.signal._upfirdn import _output_len, _upfirdn_modes
|
||||
from scipy.signal._upfirdn_apply import _pad_test
|
||||
|
||||
|
||||
def upfirdn_naive(x, h, up=1, down=1):
|
||||
"""Naive upfirdn processing in Python.
|
||||
|
||||
Note: arg order (x, h) differs to facilitate apply_along_axis use.
|
||||
"""
|
||||
h = np.asarray(h)
|
||||
out = np.zeros(len(x) * up, x.dtype)
|
||||
out[::up] = x
|
||||
out = np.convolve(h, out)[::down][:_output_len(len(h), len(x), up, down)]
|
||||
return out
|
||||
|
||||
|
||||
class UpFIRDnCase:
|
||||
"""Test _UpFIRDn object"""
|
||||
def __init__(self, up, down, h, x_dtype):
|
||||
self.up = up
|
||||
self.down = down
|
||||
self.h = np.atleast_1d(h)
|
||||
self.x_dtype = x_dtype
|
||||
self.rng = np.random.RandomState(17)
|
||||
|
||||
def __call__(self):
|
||||
# tiny signal
|
||||
self.scrub(np.ones(1, self.x_dtype))
|
||||
# ones
|
||||
self.scrub(np.ones(10, self.x_dtype)) # ones
|
||||
# randn
|
||||
x = self.rng.randn(10).astype(self.x_dtype)
|
||||
if self.x_dtype in (np.complex64, np.complex128):
|
||||
x += 1j * self.rng.randn(10)
|
||||
self.scrub(x)
|
||||
# ramp
|
||||
self.scrub(np.arange(10).astype(self.x_dtype))
|
||||
# 3D, random
|
||||
size = (2, 3, 5)
|
||||
x = self.rng.randn(*size).astype(self.x_dtype)
|
||||
if self.x_dtype in (np.complex64, np.complex128):
|
||||
x += 1j * self.rng.randn(*size)
|
||||
for axis in range(len(size)):
|
||||
self.scrub(x, axis=axis)
|
||||
x = x[:, ::2, 1::3].T
|
||||
for axis in range(len(size)):
|
||||
self.scrub(x, axis=axis)
|
||||
|
||||
def scrub(self, x, axis=-1):
|
||||
yr = np.apply_along_axis(upfirdn_naive, axis, x,
|
||||
self.h, self.up, self.down)
|
||||
want_len = _output_len(len(self.h), x.shape[axis], self.up, self.down)
|
||||
assert yr.shape[axis] == want_len
|
||||
y = upfirdn(self.h, x, self.up, self.down, axis=axis)
|
||||
assert y.shape[axis] == want_len
|
||||
assert y.shape == yr.shape
|
||||
dtypes = (self.h.dtype, x.dtype)
|
||||
if all(d == np.complex64 for d in dtypes):
|
||||
assert_equal(y.dtype, np.complex64)
|
||||
elif np.complex64 in dtypes and np.float32 in dtypes:
|
||||
assert_equal(y.dtype, np.complex64)
|
||||
elif all(d == np.float32 for d in dtypes):
|
||||
assert_equal(y.dtype, np.float32)
|
||||
elif np.complex128 in dtypes or np.complex64 in dtypes:
|
||||
assert_equal(y.dtype, np.complex128)
|
||||
else:
|
||||
assert_equal(y.dtype, np.float64)
|
||||
assert_allclose(yr, y)
|
||||
|
||||
|
||||
_UPFIRDN_TYPES = (int, np.float32, np.complex64, float, complex)
|
||||
|
||||
|
||||
class TestUpfirdn:
|
||||
|
||||
def test_valid_input(self):
|
||||
assert_raises(ValueError, upfirdn, [1], [1], 1, 0) # up or down < 1
|
||||
assert_raises(ValueError, upfirdn, [], [1], 1, 1) # h.ndim != 1
|
||||
assert_raises(ValueError, upfirdn, [[1]], [1], 1, 1)
|
||||
|
||||
@pytest.mark.parametrize('len_h', [1, 2, 3, 4, 5])
|
||||
@pytest.mark.parametrize('len_x', [1, 2, 3, 4, 5])
|
||||
def test_singleton(self, len_h, len_x):
|
||||
# gh-9844: lengths producing expected outputs
|
||||
h = np.zeros(len_h)
|
||||
h[len_h // 2] = 1. # make h a delta
|
||||
x = np.ones(len_x)
|
||||
y = upfirdn(h, x, 1, 1)
|
||||
want = np.pad(x, (len_h // 2, (len_h - 1) // 2), 'constant')
|
||||
assert_allclose(y, want)
|
||||
|
||||
def test_shift_x(self):
|
||||
# gh-9844: shifted x can change values?
|
||||
y = upfirdn([1, 1], [1.], 1, 1)
|
||||
assert_allclose(y, [1, 1]) # was [0, 1] in the issue
|
||||
y = upfirdn([1, 1], [0., 1.], 1, 1)
|
||||
assert_allclose(y, [0, 1, 1])
|
||||
|
||||
# A bunch of lengths/factors chosen because they exposed differences
|
||||
# between the "old way" and new way of computing length, and then
|
||||
# got `expected` from MATLAB
|
||||
@pytest.mark.parametrize('len_h, len_x, up, down, expected', [
|
||||
(2, 2, 5, 2, [1, 0, 0, 0]),
|
||||
(2, 3, 6, 3, [1, 0, 1, 0, 1]),
|
||||
(2, 4, 4, 3, [1, 0, 0, 0, 1]),
|
||||
(3, 2, 6, 2, [1, 0, 0, 1, 0]),
|
||||
(4, 11, 3, 5, [1, 0, 0, 1, 0, 0, 1]),
|
||||
])
|
||||
def test_length_factors(self, len_h, len_x, up, down, expected):
|
||||
# gh-9844: weird factors
|
||||
h = np.zeros(len_h)
|
||||
h[0] = 1.
|
||||
x = np.ones(len_x)
|
||||
y = upfirdn(h, x, up, down)
|
||||
assert_allclose(y, expected)
|
||||
|
||||
@pytest.mark.parametrize('down, want_len', [ # lengths from MATLAB
|
||||
(2, 5015),
|
||||
(11, 912),
|
||||
(79, 127),
|
||||
])
|
||||
def test_vs_convolve(self, down, want_len):
|
||||
# Check that up=1.0 gives same answer as convolve + slicing
|
||||
random_state = np.random.RandomState(17)
|
||||
try_types = (int, np.float32, np.complex64, float, complex)
|
||||
size = 10000
|
||||
|
||||
for dtype in try_types:
|
||||
x = random_state.randn(size).astype(dtype)
|
||||
if dtype in (np.complex64, np.complex128):
|
||||
x += 1j * random_state.randn(size)
|
||||
|
||||
h = firwin(31, 1. / down, window='hamming')
|
||||
yl = upfirdn_naive(x, h, 1, down)
|
||||
y = upfirdn(h, x, up=1, down=down)
|
||||
assert y.shape == (want_len,)
|
||||
assert yl.shape[0] == y.shape[0]
|
||||
assert_allclose(yl, y, atol=1e-7, rtol=1e-7)
|
||||
|
||||
@pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES)
|
||||
@pytest.mark.parametrize('h', (1., 1j))
|
||||
@pytest.mark.parametrize('up, down', [(1, 1), (2, 2), (3, 2), (2, 3)])
|
||||
def test_vs_naive_delta(self, x_dtype, h, up, down):
|
||||
UpFIRDnCase(up, down, h, x_dtype)()
|
||||
|
||||
@pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES)
|
||||
@pytest.mark.parametrize('h_dtype', _UPFIRDN_TYPES)
|
||||
@pytest.mark.parametrize('p_max, q_max',
|
||||
list(product((10, 100), (10, 100))))
|
||||
def test_vs_naive(self, x_dtype, h_dtype, p_max, q_max):
|
||||
tests = self._random_factors(p_max, q_max, h_dtype, x_dtype)
|
||||
for test in tests:
|
||||
test()
|
||||
|
||||
def _random_factors(self, p_max, q_max, h_dtype, x_dtype):
|
||||
n_rep = 3
|
||||
longest_h = 25
|
||||
random_state = np.random.RandomState(17)
|
||||
tests = []
|
||||
|
||||
for _ in range(n_rep):
|
||||
# Randomize the up/down factors somewhat
|
||||
p_add = q_max if p_max > q_max else 1
|
||||
q_add = p_max if q_max > p_max else 1
|
||||
p = random_state.randint(p_max) + p_add
|
||||
q = random_state.randint(q_max) + q_add
|
||||
|
||||
# Generate random FIR coefficients
|
||||
len_h = random_state.randint(longest_h) + 1
|
||||
h = np.atleast_1d(random_state.randint(len_h))
|
||||
h = h.astype(h_dtype)
|
||||
if h_dtype == complex:
|
||||
h += 1j * random_state.randint(len_h)
|
||||
|
||||
tests.append(UpFIRDnCase(p, q, h, x_dtype))
|
||||
|
||||
return tests
|
||||
|
||||
@pytest.mark.parametrize('mode', _upfirdn_modes)
|
||||
def test_extensions(self, mode):
|
||||
"""Test vs. manually computed results for modes not in numpy's pad."""
|
||||
x = np.array([1, 2, 3, 1], dtype=float)
|
||||
npre, npost = 6, 6
|
||||
y = _pad_test(x, npre=npre, npost=npost, mode=mode)
|
||||
if mode == 'antisymmetric':
|
||||
y_expected = np.asarray(
|
||||
[3, 1, -1, -3, -2, -1, 1, 2, 3, 1, -1, -3, -2, -1, 1, 2])
|
||||
elif mode == 'antireflect':
|
||||
y_expected = np.asarray(
|
||||
[1, 2, 3, 1, -1, 0, 1, 2, 3, 1, -1, 0, 1, 2, 3, 1])
|
||||
elif mode == 'smooth':
|
||||
y_expected = np.asarray(
|
||||
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 1, -1, -3, -5, -7, -9, -11])
|
||||
elif mode == "line":
|
||||
lin_slope = (x[-1] - x[0]) / (len(x) - 1)
|
||||
left = x[0] + np.arange(-npre, 0, 1) * lin_slope
|
||||
right = x[-1] + np.arange(1, npost + 1) * lin_slope
|
||||
y_expected = np.concatenate((left, x, right))
|
||||
else:
|
||||
y_expected = np.pad(x, (npre, npost), mode=mode)
|
||||
assert_allclose(y, y_expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'size, h_len, mode, dtype',
|
||||
product(
|
||||
[8],
|
||||
[4, 5, 26], # include cases with h_len > 2*size
|
||||
_upfirdn_modes,
|
||||
[np.float32, np.float64, np.complex64, np.complex128],
|
||||
)
|
||||
)
|
||||
def test_modes(self, size, h_len, mode, dtype):
|
||||
random_state = np.random.RandomState(5)
|
||||
x = random_state.randn(size).astype(dtype)
|
||||
if dtype in (np.complex64, np.complex128):
|
||||
x += 1j * random_state.randn(size)
|
||||
h = np.arange(1, 1 + h_len, dtype=x.real.dtype)
|
||||
|
||||
y = upfirdn(h, x, up=1, down=1, mode=mode)
|
||||
# expected result: pad the input, filter with zero padding, then crop
|
||||
npad = h_len - 1
|
||||
if mode in ['antisymmetric', 'antireflect', 'smooth', 'line']:
|
||||
# use _pad_test test function for modes not supported by np.pad.
|
||||
xpad = _pad_test(x, npre=npad, npost=npad, mode=mode)
|
||||
else:
|
||||
xpad = np.pad(x, npad, mode=mode)
|
||||
ypad = upfirdn(h, xpad, up=1, down=1, mode='constant')
|
||||
y_expected = ypad[npad:-npad]
|
||||
|
||||
atol = rtol = np.finfo(dtype).eps * 1e2
|
||||
assert_allclose(y, y_expected, atol=atol, rtol=rtol)
|
||||
|
||||
|
||||
def test_output_len_long_input():
|
||||
# Regression test for gh-17375. On Windows, a large enough input
|
||||
# that should have been well within the capabilities of 64 bit integers
|
||||
# would result in a 32 bit overflow because of a bug in Cython 0.29.32.
|
||||
len_h = 1001
|
||||
in_len = 10**8
|
||||
up = 320
|
||||
down = 441
|
||||
out_len = _output_len(len_h, in_len, up, down)
|
||||
# The expected value was computed "by hand" from the formula
|
||||
# (((in_len - 1) * up + len_h) - 1) // down + 1
|
||||
assert out_len == 72562360
|
||||
@ -0,0 +1,351 @@
|
||||
import numpy as np
|
||||
from numpy.testing import (assert_almost_equal, assert_equal,
|
||||
assert_, assert_allclose, assert_array_equal)
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
import scipy.signal._waveforms as waveforms
|
||||
|
||||
|
||||
# These chirp_* functions are the instantaneous frequencies of the signals
|
||||
# returned by chirp().
|
||||
|
||||
def chirp_linear(t, f0, f1, t1):
|
||||
f = f0 + (f1 - f0) * t / t1
|
||||
return f
|
||||
|
||||
|
||||
def chirp_quadratic(t, f0, f1, t1, vertex_zero=True):
|
||||
if vertex_zero:
|
||||
f = f0 + (f1 - f0) * t**2 / t1**2
|
||||
else:
|
||||
f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2
|
||||
return f
|
||||
|
||||
|
||||
def chirp_geometric(t, f0, f1, t1):
|
||||
f = f0 * (f1/f0)**(t/t1)
|
||||
return f
|
||||
|
||||
|
||||
def chirp_hyperbolic(t, f0, f1, t1):
|
||||
f = f0*f1*t1 / ((f0 - f1)*t + f1*t1)
|
||||
return f
|
||||
|
||||
|
||||
def compute_frequency(t, theta):
|
||||
"""
|
||||
Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t).
|
||||
"""
|
||||
# Assume theta and t are 1-D NumPy arrays.
|
||||
# Assume that t is uniformly spaced.
|
||||
dt = t[1] - t[0]
|
||||
f = np.diff(theta)/(2*np.pi) / dt
|
||||
tf = 0.5*(t[1:] + t[:-1])
|
||||
return tf, f
|
||||
|
||||
|
||||
class TestChirp:
|
||||
|
||||
def test_linear_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear')
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_linear_freq_01(self):
|
||||
method = 'linear'
|
||||
f0 = 1.0
|
||||
f1 = 2.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 100)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_linear_freq_02(self):
|
||||
method = 'linear'
|
||||
f0 = 200.0
|
||||
f1 = 100.0
|
||||
t1 = 10.0
|
||||
t = np.linspace(0, t1, 100)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_quadratic_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic')
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_quadratic_at_zero2(self):
|
||||
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic',
|
||||
vertex_zero=False)
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_quadratic_freq_01(self):
|
||||
method = 'quadratic'
|
||||
f0 = 1.0
|
||||
f1 = 2.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 2000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_quadratic_freq_02(self):
|
||||
method = 'quadratic'
|
||||
f0 = 20.0
|
||||
f1 = 10.0
|
||||
t1 = 10.0
|
||||
t = np.linspace(0, t1, 2000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_logarithmic_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic')
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_logarithmic_freq_01(self):
|
||||
method = 'logarithmic'
|
||||
f0 = 1.0
|
||||
f1 = 2.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 10000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_logarithmic_freq_02(self):
|
||||
method = 'logarithmic'
|
||||
f0 = 200.0
|
||||
f1 = 100.0
|
||||
t1 = 10.0
|
||||
t = np.linspace(0, t1, 10000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_logarithmic_freq_03(self):
|
||||
method = 'logarithmic'
|
||||
f0 = 100.0
|
||||
f1 = 100.0
|
||||
t1 = 10.0
|
||||
t = np.linspace(0, t1, 10000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_hyperbolic_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic')
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_hyperbolic_freq_01(self):
|
||||
method = 'hyperbolic'
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 10000)
|
||||
# f0 f1
|
||||
cases = [[10.0, 1.0],
|
||||
[1.0, 10.0],
|
||||
[-10.0, -1.0],
|
||||
[-1.0, -10.0]]
|
||||
for f0, f1 in cases:
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = chirp_hyperbolic(tf, f0, f1, t1)
|
||||
assert_allclose(f, expected)
|
||||
|
||||
def test_hyperbolic_zero_freq(self):
|
||||
# f0=0 or f1=0 must raise a ValueError.
|
||||
method = 'hyperbolic'
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 5)
|
||||
assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method)
|
||||
assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method)
|
||||
|
||||
def test_unknown_method(self):
|
||||
method = "foo"
|
||||
f0 = 10.0
|
||||
f1 = 20.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 10)
|
||||
assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method)
|
||||
|
||||
def test_integer_t1(self):
|
||||
f0 = 10.0
|
||||
f1 = 20.0
|
||||
t = np.linspace(-1, 1, 11)
|
||||
t1 = 3.0
|
||||
float_result = waveforms.chirp(t, f0, t1, f1)
|
||||
t1 = 3
|
||||
int_result = waveforms.chirp(t, f0, t1, f1)
|
||||
err_msg = "Integer input 't1=3' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_f0(self):
|
||||
f1 = 20.0
|
||||
t1 = 3.0
|
||||
t = np.linspace(-1, 1, 11)
|
||||
f0 = 10.0
|
||||
float_result = waveforms.chirp(t, f0, t1, f1)
|
||||
f0 = 10
|
||||
int_result = waveforms.chirp(t, f0, t1, f1)
|
||||
err_msg = "Integer input 'f0=10' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_f1(self):
|
||||
f0 = 10.0
|
||||
t1 = 3.0
|
||||
t = np.linspace(-1, 1, 11)
|
||||
f1 = 20.0
|
||||
float_result = waveforms.chirp(t, f0, t1, f1)
|
||||
f1 = 20
|
||||
int_result = waveforms.chirp(t, f0, t1, f1)
|
||||
err_msg = "Integer input 'f1=20' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_all(self):
|
||||
f0 = 10
|
||||
t1 = 3
|
||||
f1 = 20
|
||||
t = np.linspace(-1, 1, 11)
|
||||
float_result = waveforms.chirp(t, float(f0), float(t1), float(f1))
|
||||
int_result = waveforms.chirp(t, f0, t1, f1)
|
||||
err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
|
||||
class TestSweepPoly:
|
||||
|
||||
def test_sweep_poly_quad1(self):
|
||||
p = np.poly1d([1.0, 0.0, 1.0])
|
||||
t = np.linspace(0, 3.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_sweep_poly_const(self):
|
||||
p = np.poly1d(2.0)
|
||||
t = np.linspace(0, 3.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_sweep_poly_linear(self):
|
||||
p = np.poly1d([-1.0, 10.0])
|
||||
t = np.linspace(0, 3.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_sweep_poly_quad2(self):
|
||||
p = np.poly1d([1.0, 0.0, -2.0])
|
||||
t = np.linspace(0, 3.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_sweep_poly_cubic(self):
|
||||
p = np.poly1d([2.0, 1.0, 0.0, -2.0])
|
||||
t = np.linspace(0, 2.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_sweep_poly_cubic2(self):
|
||||
"""Use an array of coefficients instead of a poly1d."""
|
||||
p = np.array([2.0, 1.0, 0.0, -2.0])
|
||||
t = np.linspace(0, 2.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = np.poly1d(p)(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_sweep_poly_cubic3(self):
|
||||
"""Use a list of coefficients instead of a poly1d."""
|
||||
p = [2.0, 1.0, 0.0, -2.0]
|
||||
t = np.linspace(0, 2.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = np.poly1d(p)(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
|
||||
class TestGaussPulse:
|
||||
|
||||
def test_integer_fc(self):
|
||||
float_result = waveforms.gausspulse('cutoff', fc=1000.0)
|
||||
int_result = waveforms.gausspulse('cutoff', fc=1000)
|
||||
err_msg = "Integer input 'fc=1000' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_bw(self):
|
||||
float_result = waveforms.gausspulse('cutoff', bw=1.0)
|
||||
int_result = waveforms.gausspulse('cutoff', bw=1)
|
||||
err_msg = "Integer input 'bw=1' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_bwr(self):
|
||||
float_result = waveforms.gausspulse('cutoff', bwr=-6.0)
|
||||
int_result = waveforms.gausspulse('cutoff', bwr=-6)
|
||||
err_msg = "Integer input 'bwr=-6' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_tpr(self):
|
||||
float_result = waveforms.gausspulse('cutoff', tpr=-60.0)
|
||||
int_result = waveforms.gausspulse('cutoff', tpr=-60)
|
||||
err_msg = "Integer input 'tpr=-60' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
|
||||
class TestUnitImpulse:
|
||||
|
||||
def test_no_index(self):
|
||||
assert_array_equal(waveforms.unit_impulse(7), [1, 0, 0, 0, 0, 0, 0])
|
||||
assert_array_equal(waveforms.unit_impulse((3, 3)),
|
||||
[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
|
||||
|
||||
def test_index(self):
|
||||
assert_array_equal(waveforms.unit_impulse(10, 3),
|
||||
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
|
||||
assert_array_equal(waveforms.unit_impulse((3, 3), (1, 1)),
|
||||
[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
|
||||
|
||||
# Broadcasting
|
||||
imp = waveforms.unit_impulse((4, 4), 2)
|
||||
assert_array_equal(imp, np.array([[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 0]]))
|
||||
|
||||
def test_mid(self):
|
||||
assert_array_equal(waveforms.unit_impulse((3, 3), 'mid'),
|
||||
[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
|
||||
assert_array_equal(waveforms.unit_impulse(9, 'mid'),
|
||||
[0, 0, 0, 0, 1, 0, 0, 0, 0])
|
||||
|
||||
def test_dtype(self):
|
||||
imp = waveforms.unit_impulse(7)
|
||||
assert_(np.issubdtype(imp.dtype, np.floating))
|
||||
|
||||
imp = waveforms.unit_impulse(5, 3, dtype=int)
|
||||
assert_(np.issubdtype(imp.dtype, np.integer))
|
||||
|
||||
imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex)
|
||||
assert_(np.issubdtype(imp.dtype, np.complexfloating))
|
||||
@ -0,0 +1,161 @@
|
||||
import numpy as np
|
||||
from numpy.testing import (assert_equal,
|
||||
assert_array_equal, assert_array_almost_equal, assert_array_less, assert_,)
|
||||
import pytest
|
||||
|
||||
import scipy.signal._wavelets as wavelets
|
||||
|
||||
|
||||
class TestWavelets:
|
||||
def test_qmf(self):
|
||||
with pytest.deprecated_call():
|
||||
assert_array_equal(wavelets.qmf([1, 1]), [1, -1])
|
||||
|
||||
def test_daub(self):
|
||||
with pytest.deprecated_call():
|
||||
for i in range(1, 15):
|
||||
assert_equal(len(wavelets.daub(i)), i * 2)
|
||||
|
||||
def test_cascade(self):
|
||||
with pytest.deprecated_call():
|
||||
for J in range(1, 7):
|
||||
for i in range(1, 5):
|
||||
lpcoef = wavelets.daub(i)
|
||||
k = len(lpcoef)
|
||||
x, phi, psi = wavelets.cascade(lpcoef, J)
|
||||
assert_(len(x) == len(phi) == len(psi))
|
||||
assert_equal(len(x), (k - 1) * 2 ** J)
|
||||
|
||||
def test_morlet(self):
|
||||
with pytest.deprecated_call():
|
||||
x = wavelets.morlet(50, 4.1, complete=True)
|
||||
y = wavelets.morlet(50, 4.1, complete=False)
|
||||
# Test if complete and incomplete wavelet have same lengths:
|
||||
assert_equal(len(x), len(y))
|
||||
# Test if complete wavelet is less than incomplete wavelet:
|
||||
assert_array_less(x, y)
|
||||
|
||||
x = wavelets.morlet(10, 50, complete=False)
|
||||
y = wavelets.morlet(10, 50, complete=True)
|
||||
# For large widths complete and incomplete wavelets should be
|
||||
# identical within numerical precision:
|
||||
assert_equal(x, y)
|
||||
|
||||
# miscellaneous tests:
|
||||
x = np.array([1.73752399e-09 + 9.84327394e-25j,
|
||||
6.49471756e-01 + 0.00000000e+00j,
|
||||
1.73752399e-09 - 9.84327394e-25j])
|
||||
y = wavelets.morlet(3, w=2, complete=True)
|
||||
assert_array_almost_equal(x, y)
|
||||
|
||||
x = np.array([2.00947715e-09 + 9.84327394e-25j,
|
||||
7.51125544e-01 + 0.00000000e+00j,
|
||||
2.00947715e-09 - 9.84327394e-25j])
|
||||
y = wavelets.morlet(3, w=2, complete=False)
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
x = wavelets.morlet(10000, s=4, complete=True)
|
||||
y = wavelets.morlet(20000, s=8, complete=True)[5000:15000]
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
x = wavelets.morlet(10000, s=4, complete=False)
|
||||
assert_array_almost_equal(y, x, decimal=2)
|
||||
y = wavelets.morlet(20000, s=8, complete=False)[5000:15000]
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
x = wavelets.morlet(10000, w=3, s=5, complete=True)
|
||||
y = wavelets.morlet(20000, w=3, s=10, complete=True)[5000:15000]
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
x = wavelets.morlet(10000, w=3, s=5, complete=False)
|
||||
assert_array_almost_equal(y, x, decimal=2)
|
||||
y = wavelets.morlet(20000, w=3, s=10, complete=False)[5000:15000]
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
x = wavelets.morlet(10000, w=7, s=10, complete=True)
|
||||
y = wavelets.morlet(20000, w=7, s=20, complete=True)[5000:15000]
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
x = wavelets.morlet(10000, w=7, s=10, complete=False)
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
y = wavelets.morlet(20000, w=7, s=20, complete=False)[5000:15000]
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
def test_morlet2(self):
|
||||
with pytest.deprecated_call():
|
||||
w = wavelets.morlet2(1.0, 0.5)
|
||||
expected = (np.pi**(-0.25) * np.sqrt(1/0.5)).astype(complex)
|
||||
assert_array_equal(w, expected)
|
||||
|
||||
lengths = [5, 11, 15, 51, 101]
|
||||
for length in lengths:
|
||||
w = wavelets.morlet2(length, 1.0)
|
||||
assert_(len(w) == length)
|
||||
max_loc = np.argmax(w)
|
||||
assert_(max_loc == (length // 2))
|
||||
|
||||
points = 100
|
||||
w = abs(wavelets.morlet2(points, 2.0))
|
||||
half_vec = np.arange(0, points // 2)
|
||||
assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])
|
||||
|
||||
x = np.array([5.03701224e-09 + 2.46742437e-24j,
|
||||
1.88279253e+00 + 0.00000000e+00j,
|
||||
5.03701224e-09 - 2.46742437e-24j])
|
||||
y = wavelets.morlet2(3, s=1/(2*np.pi), w=2)
|
||||
assert_array_almost_equal(x, y)
|
||||
|
||||
def test_ricker(self):
|
||||
with pytest.deprecated_call():
|
||||
w = wavelets.ricker(1.0, 1)
|
||||
expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25))
|
||||
assert_array_equal(w, expected)
|
||||
|
||||
lengths = [5, 11, 15, 51, 101]
|
||||
for length in lengths:
|
||||
w = wavelets.ricker(length, 1.0)
|
||||
assert_(len(w) == length)
|
||||
max_loc = np.argmax(w)
|
||||
assert_(max_loc == (length // 2))
|
||||
|
||||
points = 100
|
||||
w = wavelets.ricker(points, 2.0)
|
||||
half_vec = np.arange(0, points // 2)
|
||||
#Wavelet should be symmetric
|
||||
assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])
|
||||
|
||||
#Check zeros
|
||||
aas = [5, 10, 15, 20, 30]
|
||||
points = 99
|
||||
for a in aas:
|
||||
w = wavelets.ricker(points, a)
|
||||
vec = np.arange(0, points) - (points - 1.0) / 2
|
||||
exp_zero1 = np.argmin(np.abs(vec - a))
|
||||
exp_zero2 = np.argmin(np.abs(vec + a))
|
||||
assert_array_almost_equal(w[exp_zero1], 0)
|
||||
assert_array_almost_equal(w[exp_zero2], 0)
|
||||
|
||||
def test_cwt(self):
|
||||
with pytest.deprecated_call():
|
||||
widths = [1.0]
|
||||
def delta_wavelet(s, t):
|
||||
return np.array([1])
|
||||
len_data = 100
|
||||
test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0)
|
||||
|
||||
#Test delta function input gives same data as output
|
||||
cwt_dat = wavelets.cwt(test_data, delta_wavelet, widths)
|
||||
assert_(cwt_dat.shape == (len(widths), len_data))
|
||||
assert_array_almost_equal(test_data, cwt_dat.flatten())
|
||||
|
||||
#Check proper shape on output
|
||||
widths = [1, 3, 4, 5, 10]
|
||||
cwt_dat = wavelets.cwt(test_data, wavelets.ricker, widths)
|
||||
assert_(cwt_dat.shape == (len(widths), len_data))
|
||||
|
||||
widths = [len_data * 10]
|
||||
#Note: this wavelet isn't defined quite right, but is fine for this test
|
||||
def flat_wavelet(l, w):
|
||||
return np.full(w, 1 / w)
|
||||
cwt_dat = wavelets.cwt(test_data, flat_wavelet, widths)
|
||||
assert_array_almost_equal(cwt_dat, np.mean(test_data))
|
||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user