asd
This commit is contained in:
@ -0,0 +1,215 @@
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose
|
||||
|
||||
from scipy.integrate import quad_vec
|
||||
|
||||
from multiprocessing.dummy import Pool
|
||||
|
||||
|
||||
quadrature_params = pytest.mark.parametrize(
|
||||
'quadrature', [None, "gk15", "gk21", "trapezoid"])
|
||||
|
||||
|
||||
@quadrature_params
|
||||
def test_quad_vec_simple(quadrature):
|
||||
n = np.arange(10)
|
||||
def f(x):
|
||||
return x ** n
|
||||
for epsabs in [0.1, 1e-3, 1e-6]:
|
||||
if quadrature == 'trapezoid' and epsabs < 1e-4:
|
||||
# slow: skip
|
||||
continue
|
||||
|
||||
kwargs = dict(epsabs=epsabs, quadrature=quadrature)
|
||||
|
||||
exact = 2**(n+1)/(n + 1)
|
||||
|
||||
res, err = quad_vec(f, 0, 2, norm='max', **kwargs)
|
||||
assert_allclose(res, exact, rtol=0, atol=epsabs)
|
||||
|
||||
res, err = quad_vec(f, 0, 2, norm='2', **kwargs)
|
||||
assert np.linalg.norm(res - exact) < epsabs
|
||||
|
||||
res, err = quad_vec(f, 0, 2, norm='max', points=(0.5, 1.0), **kwargs)
|
||||
assert_allclose(res, exact, rtol=0, atol=epsabs)
|
||||
|
||||
res, err, *rest = quad_vec(f, 0, 2, norm='max',
|
||||
epsrel=1e-8,
|
||||
full_output=True,
|
||||
limit=10000,
|
||||
**kwargs)
|
||||
assert_allclose(res, exact, rtol=0, atol=epsabs)
|
||||
|
||||
|
||||
@quadrature_params
|
||||
def test_quad_vec_simple_inf(quadrature):
|
||||
def f(x):
|
||||
return 1 / (1 + np.float64(x) ** 2)
|
||||
|
||||
for epsabs in [0.1, 1e-3, 1e-6]:
|
||||
if quadrature == 'trapezoid' and epsabs < 1e-4:
|
||||
# slow: skip
|
||||
continue
|
||||
|
||||
kwargs = dict(norm='max', epsabs=epsabs, quadrature=quadrature)
|
||||
|
||||
res, err = quad_vec(f, 0, np.inf, **kwargs)
|
||||
assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
|
||||
|
||||
res, err = quad_vec(f, 0, -np.inf, **kwargs)
|
||||
assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err))
|
||||
|
||||
res, err = quad_vec(f, -np.inf, 0, **kwargs)
|
||||
assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
|
||||
|
||||
res, err = quad_vec(f, np.inf, 0, **kwargs)
|
||||
assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err))
|
||||
|
||||
res, err = quad_vec(f, -np.inf, np.inf, **kwargs)
|
||||
assert_allclose(res, np.pi, rtol=0, atol=max(epsabs, err))
|
||||
|
||||
res, err = quad_vec(f, np.inf, -np.inf, **kwargs)
|
||||
assert_allclose(res, -np.pi, rtol=0, atol=max(epsabs, err))
|
||||
|
||||
res, err = quad_vec(f, np.inf, np.inf, **kwargs)
|
||||
assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
|
||||
|
||||
res, err = quad_vec(f, -np.inf, -np.inf, **kwargs)
|
||||
assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
|
||||
|
||||
res, err = quad_vec(f, 0, np.inf, points=(1.0, 2.0), **kwargs)
|
||||
assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
|
||||
|
||||
def f(x):
|
||||
return np.sin(x + 2) / (1 + x ** 2)
|
||||
exact = np.pi / np.e * np.sin(2)
|
||||
epsabs = 1e-5
|
||||
|
||||
res, err, info = quad_vec(f, -np.inf, np.inf, limit=1000, norm='max', epsabs=epsabs,
|
||||
quadrature=quadrature, full_output=True)
|
||||
assert info.status == 1
|
||||
assert_allclose(res, exact, rtol=0, atol=max(epsabs, 1.5 * err))
|
||||
|
||||
|
||||
def test_quad_vec_args():
|
||||
def f(x, a):
|
||||
return x * (x + a) * np.arange(3)
|
||||
a = 2
|
||||
exact = np.array([0, 4/3, 8/3])
|
||||
|
||||
res, err = quad_vec(f, 0, 1, args=(a,))
|
||||
assert_allclose(res, exact, rtol=0, atol=1e-4)
|
||||
|
||||
|
||||
def _lorenzian(x):
|
||||
return 1 / (1 + x**2)
|
||||
|
||||
|
||||
@pytest.mark.fail_slow(5)
|
||||
def test_quad_vec_pool():
|
||||
f = _lorenzian
|
||||
res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=4)
|
||||
assert_allclose(res, np.pi, rtol=0, atol=1e-4)
|
||||
|
||||
with Pool(10) as pool:
|
||||
def f(x):
|
||||
return 1 / (1 + x ** 2)
|
||||
res, _ = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=pool.map)
|
||||
assert_allclose(res, np.pi, rtol=0, atol=1e-4)
|
||||
|
||||
|
||||
def _func_with_args(x, a):
|
||||
return x * (x + a) * np.arange(3)
|
||||
|
||||
|
||||
@pytest.mark.fail_slow(5)
|
||||
@pytest.mark.parametrize('extra_args', [2, (2,)])
|
||||
@pytest.mark.parametrize('workers', [1, 10])
|
||||
def test_quad_vec_pool_args(extra_args, workers):
|
||||
f = _func_with_args
|
||||
exact = np.array([0, 4/3, 8/3])
|
||||
|
||||
res, err = quad_vec(f, 0, 1, args=extra_args, workers=workers)
|
||||
assert_allclose(res, exact, rtol=0, atol=1e-4)
|
||||
|
||||
with Pool(workers) as pool:
|
||||
res, err = quad_vec(f, 0, 1, args=extra_args, workers=pool.map)
|
||||
assert_allclose(res, exact, rtol=0, atol=1e-4)
|
||||
|
||||
|
||||
@quadrature_params
|
||||
def test_num_eval(quadrature):
|
||||
def f(x):
|
||||
count[0] += 1
|
||||
return x**5
|
||||
|
||||
count = [0]
|
||||
res = quad_vec(f, 0, 1, norm='max', full_output=True, quadrature=quadrature)
|
||||
assert res[2].neval == count[0]
|
||||
|
||||
|
||||
def test_info():
|
||||
def f(x):
|
||||
return np.ones((3, 2, 1))
|
||||
|
||||
res, err, info = quad_vec(f, 0, 1, norm='max', full_output=True)
|
||||
|
||||
assert info.success is True
|
||||
assert info.status == 0
|
||||
assert info.message == 'Target precision reached.'
|
||||
assert info.neval > 0
|
||||
assert info.intervals.shape[1] == 2
|
||||
assert info.integrals.shape == (info.intervals.shape[0], 3, 2, 1)
|
||||
assert info.errors.shape == (info.intervals.shape[0],)
|
||||
|
||||
|
||||
def test_nan_inf():
|
||||
def f_nan(x):
|
||||
return np.nan
|
||||
|
||||
def f_inf(x):
|
||||
return np.inf if x < 0.1 else 1/x
|
||||
|
||||
res, err, info = quad_vec(f_nan, 0, 1, full_output=True)
|
||||
assert info.status == 3
|
||||
|
||||
res, err, info = quad_vec(f_inf, 0, 1, full_output=True)
|
||||
assert info.status == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('a,b', [(0, 1), (0, np.inf), (np.inf, 0),
|
||||
(-np.inf, np.inf), (np.inf, -np.inf)])
|
||||
def test_points(a, b):
|
||||
# Check that initial interval splitting is done according to
|
||||
# `points`, by checking that consecutive sets of 15 point (for
|
||||
# gk15) function evaluations lie between `points`
|
||||
|
||||
points = (0, 0.25, 0.5, 0.75, 1.0)
|
||||
points += tuple(-x for x in points)
|
||||
|
||||
quadrature_points = 15
|
||||
interval_sets = []
|
||||
count = 0
|
||||
|
||||
def f(x):
|
||||
nonlocal count
|
||||
|
||||
if count % quadrature_points == 0:
|
||||
interval_sets.append(set())
|
||||
|
||||
count += 1
|
||||
interval_sets[-1].add(float(x))
|
||||
return 0.0
|
||||
|
||||
quad_vec(f, a, b, points=points, quadrature='gk15', limit=0)
|
||||
|
||||
# Check that all point sets lie in a single `points` interval
|
||||
for p in interval_sets:
|
||||
j = np.searchsorted(sorted(points), tuple(p))
|
||||
assert np.all(j == j[0])
|
||||
|
||||
def test_trapz_deprecation():
|
||||
with pytest.deprecated_call(match="`quadrature='trapz'`"):
|
||||
quad_vec(lambda x: x, 0, 1, quadrature="trapz")
|
||||
@ -0,0 +1,218 @@
|
||||
import itertools
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose
|
||||
from scipy.integrate import ode
|
||||
|
||||
|
||||
def _band_count(a):
|
||||
"""Returns ml and mu, the lower and upper band sizes of a."""
|
||||
nrows, ncols = a.shape
|
||||
ml = 0
|
||||
for k in range(-nrows+1, 0):
|
||||
if np.diag(a, k).any():
|
||||
ml = -k
|
||||
break
|
||||
mu = 0
|
||||
for k in range(nrows-1, 0, -1):
|
||||
if np.diag(a, k).any():
|
||||
mu = k
|
||||
break
|
||||
return ml, mu
|
||||
|
||||
|
||||
def _linear_func(t, y, a):
|
||||
"""Linear system dy/dt = a * y"""
|
||||
return a.dot(y)
|
||||
|
||||
|
||||
def _linear_jac(t, y, a):
|
||||
"""Jacobian of a * y is a."""
|
||||
return a
|
||||
|
||||
|
||||
def _linear_banded_jac(t, y, a):
|
||||
"""Banded Jacobian."""
|
||||
ml, mu = _band_count(a)
|
||||
bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)]
|
||||
bjac.append(np.diag(a))
|
||||
for k in range(-1, -ml-1, -1):
|
||||
bjac.append(np.r_[np.diag(a, k), [0] * (-k)])
|
||||
return bjac
|
||||
|
||||
|
||||
def _solve_linear_sys(a, y0, tend=1, dt=0.1,
|
||||
solver=None, method='bdf', use_jac=True,
|
||||
with_jacobian=False, banded=False):
|
||||
"""Use scipy.integrate.ode to solve a linear system of ODEs.
|
||||
|
||||
a : square ndarray
|
||||
Matrix of the linear system to be solved.
|
||||
y0 : ndarray
|
||||
Initial condition
|
||||
tend : float
|
||||
Stop time.
|
||||
dt : float
|
||||
Step size of the output.
|
||||
solver : str
|
||||
If not None, this must be "vode", "lsoda" or "zvode".
|
||||
method : str
|
||||
Either "bdf" or "adams".
|
||||
use_jac : bool
|
||||
Determines if the jacobian function is passed to ode().
|
||||
with_jacobian : bool
|
||||
Passed to ode.set_integrator().
|
||||
banded : bool
|
||||
Determines whether a banded or full jacobian is used.
|
||||
If `banded` is True, `lband` and `uband` are determined by the
|
||||
values in `a`.
|
||||
"""
|
||||
if banded:
|
||||
lband, uband = _band_count(a)
|
||||
else:
|
||||
lband = None
|
||||
uband = None
|
||||
|
||||
if use_jac:
|
||||
if banded:
|
||||
r = ode(_linear_func, _linear_banded_jac)
|
||||
else:
|
||||
r = ode(_linear_func, _linear_jac)
|
||||
else:
|
||||
r = ode(_linear_func)
|
||||
|
||||
if solver is None:
|
||||
if np.iscomplexobj(a):
|
||||
solver = "zvode"
|
||||
else:
|
||||
solver = "vode"
|
||||
|
||||
r.set_integrator(solver,
|
||||
with_jacobian=with_jacobian,
|
||||
method=method,
|
||||
lband=lband, uband=uband,
|
||||
rtol=1e-9, atol=1e-10,
|
||||
)
|
||||
t0 = 0
|
||||
r.set_initial_value(y0, t0)
|
||||
r.set_f_params(a)
|
||||
r.set_jac_params(a)
|
||||
|
||||
t = [t0]
|
||||
y = [y0]
|
||||
while r.successful() and r.t < tend:
|
||||
r.integrate(r.t + dt)
|
||||
t.append(r.t)
|
||||
y.append(r.y)
|
||||
|
||||
t = np.array(t)
|
||||
y = np.array(y)
|
||||
return t, y
|
||||
|
||||
|
||||
def _analytical_solution(a, y0, t):
|
||||
"""
|
||||
Analytical solution to the linear differential equations dy/dt = a*y.
|
||||
|
||||
The solution is only valid if `a` is diagonalizable.
|
||||
|
||||
Returns a 2-D array with shape (len(t), len(y0)).
|
||||
"""
|
||||
lam, v = np.linalg.eig(a)
|
||||
c = np.linalg.solve(v, y0)
|
||||
e = c * np.exp(lam * t.reshape(-1, 1))
|
||||
sol = e.dot(v.T)
|
||||
return sol
|
||||
|
||||
|
||||
def test_banded_ode_solvers():
|
||||
# Test the "lsoda", "vode" and "zvode" solvers of the `ode` class
|
||||
# with a system that has a banded Jacobian matrix.
|
||||
|
||||
t_exact = np.linspace(0, 1.0, 5)
|
||||
|
||||
# --- Real arrays for testing the "lsoda" and "vode" solvers ---
|
||||
|
||||
# lband = 2, uband = 1:
|
||||
a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0],
|
||||
[0.2, -0.5, 0.9, 0.0, 0.0],
|
||||
[0.1, 0.1, -0.4, 0.1, 0.0],
|
||||
[0.0, 0.3, -0.1, -0.9, -0.3],
|
||||
[0.0, 0.0, 0.1, 0.1, -0.7]])
|
||||
|
||||
# lband = 0, uband = 1:
|
||||
a_real_upper = np.triu(a_real)
|
||||
|
||||
# lband = 2, uband = 0:
|
||||
a_real_lower = np.tril(a_real)
|
||||
|
||||
# lband = 0, uband = 0:
|
||||
a_real_diag = np.triu(a_real_lower)
|
||||
|
||||
real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag]
|
||||
real_solutions = []
|
||||
|
||||
for a in real_matrices:
|
||||
y0 = np.arange(1, a.shape[0] + 1)
|
||||
y_exact = _analytical_solution(a, y0, t_exact)
|
||||
real_solutions.append((y0, t_exact, y_exact))
|
||||
|
||||
def check_real(idx, solver, meth, use_jac, with_jac, banded):
|
||||
a = real_matrices[idx]
|
||||
y0, t_exact, y_exact = real_solutions[idx]
|
||||
t, y = _solve_linear_sys(a, y0,
|
||||
tend=t_exact[-1],
|
||||
dt=t_exact[1] - t_exact[0],
|
||||
solver=solver,
|
||||
method=meth,
|
||||
use_jac=use_jac,
|
||||
with_jacobian=with_jac,
|
||||
banded=banded)
|
||||
assert_allclose(t, t_exact)
|
||||
assert_allclose(y, y_exact)
|
||||
|
||||
for idx in range(len(real_matrices)):
|
||||
p = [['vode', 'lsoda'], # solver
|
||||
['bdf', 'adams'], # method
|
||||
[False, True], # use_jac
|
||||
[False, True], # with_jacobian
|
||||
[False, True]] # banded
|
||||
for solver, meth, use_jac, with_jac, banded in itertools.product(*p):
|
||||
check_real(idx, solver, meth, use_jac, with_jac, banded)
|
||||
|
||||
# --- Complex arrays for testing the "zvode" solver ---
|
||||
|
||||
# complex, lband = 2, uband = 1:
|
||||
a_complex = a_real - 0.5j * a_real
|
||||
|
||||
# complex, lband = 0, uband = 0:
|
||||
a_complex_diag = np.diag(np.diag(a_complex))
|
||||
|
||||
complex_matrices = [a_complex, a_complex_diag]
|
||||
complex_solutions = []
|
||||
|
||||
for a in complex_matrices:
|
||||
y0 = np.arange(1, a.shape[0] + 1) + 1j
|
||||
y_exact = _analytical_solution(a, y0, t_exact)
|
||||
complex_solutions.append((y0, t_exact, y_exact))
|
||||
|
||||
def check_complex(idx, solver, meth, use_jac, with_jac, banded):
|
||||
a = complex_matrices[idx]
|
||||
y0, t_exact, y_exact = complex_solutions[idx]
|
||||
t, y = _solve_linear_sys(a, y0,
|
||||
tend=t_exact[-1],
|
||||
dt=t_exact[1] - t_exact[0],
|
||||
solver=solver,
|
||||
method=meth,
|
||||
use_jac=use_jac,
|
||||
with_jacobian=with_jac,
|
||||
banded=banded)
|
||||
assert_allclose(t, t_exact)
|
||||
assert_allclose(y, y_exact)
|
||||
|
||||
for idx in range(len(complex_matrices)):
|
||||
p = [['bdf', 'adams'], # method
|
||||
[False, True], # use_jac
|
||||
[False, True], # with_jacobian
|
||||
[False, True]] # banded
|
||||
for meth, use_jac, with_jac, banded in itertools.product(*p):
|
||||
check_complex(idx, "zvode", meth, use_jac, with_jac, banded)
|
||||
@ -0,0 +1,711 @@
|
||||
import sys
|
||||
|
||||
try:
|
||||
from StringIO import StringIO
|
||||
except ImportError:
|
||||
from io import StringIO
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (assert_, assert_array_equal, assert_allclose,
|
||||
assert_equal)
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from scipy.sparse import coo_matrix
|
||||
from scipy.special import erf
|
||||
from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac,
|
||||
estimate_bc_jac, compute_jac_indices,
|
||||
construct_global_jac, solve_bvp)
|
||||
|
||||
|
||||
def exp_fun(x, y):
|
||||
return np.vstack((y[1], y[0]))
|
||||
|
||||
|
||||
def exp_fun_jac(x, y):
|
||||
df_dy = np.empty((2, 2, x.shape[0]))
|
||||
df_dy[0, 0] = 0
|
||||
df_dy[0, 1] = 1
|
||||
df_dy[1, 0] = 1
|
||||
df_dy[1, 1] = 0
|
||||
return df_dy
|
||||
|
||||
|
||||
def exp_bc(ya, yb):
|
||||
return np.hstack((ya[0] - 1, yb[0]))
|
||||
|
||||
|
||||
def exp_bc_complex(ya, yb):
|
||||
return np.hstack((ya[0] - 1 - 1j, yb[0]))
|
||||
|
||||
|
||||
def exp_bc_jac(ya, yb):
|
||||
dbc_dya = np.array([
|
||||
[1, 0],
|
||||
[0, 0]
|
||||
])
|
||||
dbc_dyb = np.array([
|
||||
[0, 0],
|
||||
[1, 0]
|
||||
])
|
||||
return dbc_dya, dbc_dyb
|
||||
|
||||
|
||||
def exp_sol(x):
|
||||
return (np.exp(-x) - np.exp(x - 2)) / (1 - np.exp(-2))
|
||||
|
||||
|
||||
def sl_fun(x, y, p):
|
||||
return np.vstack((y[1], -p[0]**2 * y[0]))
|
||||
|
||||
|
||||
def sl_fun_jac(x, y, p):
|
||||
n, m = y.shape
|
||||
df_dy = np.empty((n, 2, m))
|
||||
df_dy[0, 0] = 0
|
||||
df_dy[0, 1] = 1
|
||||
df_dy[1, 0] = -p[0]**2
|
||||
df_dy[1, 1] = 0
|
||||
|
||||
df_dp = np.empty((n, 1, m))
|
||||
df_dp[0, 0] = 0
|
||||
df_dp[1, 0] = -2 * p[0] * y[0]
|
||||
|
||||
return df_dy, df_dp
|
||||
|
||||
|
||||
def sl_bc(ya, yb, p):
|
||||
return np.hstack((ya[0], yb[0], ya[1] - p[0]))
|
||||
|
||||
|
||||
def sl_bc_jac(ya, yb, p):
|
||||
dbc_dya = np.zeros((3, 2))
|
||||
dbc_dya[0, 0] = 1
|
||||
dbc_dya[2, 1] = 1
|
||||
|
||||
dbc_dyb = np.zeros((3, 2))
|
||||
dbc_dyb[1, 0] = 1
|
||||
|
||||
dbc_dp = np.zeros((3, 1))
|
||||
dbc_dp[2, 0] = -1
|
||||
|
||||
return dbc_dya, dbc_dyb, dbc_dp
|
||||
|
||||
|
||||
def sl_sol(x, p):
|
||||
return np.sin(p[0] * x)
|
||||
|
||||
|
||||
def emden_fun(x, y):
|
||||
return np.vstack((y[1], -y[0]**5))
|
||||
|
||||
|
||||
def emden_fun_jac(x, y):
|
||||
df_dy = np.empty((2, 2, x.shape[0]))
|
||||
df_dy[0, 0] = 0
|
||||
df_dy[0, 1] = 1
|
||||
df_dy[1, 0] = -5 * y[0]**4
|
||||
df_dy[1, 1] = 0
|
||||
return df_dy
|
||||
|
||||
|
||||
def emden_bc(ya, yb):
|
||||
return np.array([ya[1], yb[0] - (3/4)**0.5])
|
||||
|
||||
|
||||
def emden_bc_jac(ya, yb):
|
||||
dbc_dya = np.array([
|
||||
[0, 1],
|
||||
[0, 0]
|
||||
])
|
||||
dbc_dyb = np.array([
|
||||
[0, 0],
|
||||
[1, 0]
|
||||
])
|
||||
return dbc_dya, dbc_dyb
|
||||
|
||||
|
||||
def emden_sol(x):
|
||||
return (1 + x**2/3)**-0.5
|
||||
|
||||
|
||||
def undefined_fun(x, y):
|
||||
return np.zeros_like(y)
|
||||
|
||||
|
||||
def undefined_bc(ya, yb):
|
||||
return np.array([ya[0], yb[0] - 1])
|
||||
|
||||
|
||||
def big_fun(x, y):
|
||||
f = np.zeros_like(y)
|
||||
f[::2] = y[1::2]
|
||||
return f
|
||||
|
||||
|
||||
def big_bc(ya, yb):
|
||||
return np.hstack((ya[::2], yb[::2] - 1))
|
||||
|
||||
|
||||
def big_sol(x, n):
|
||||
y = np.ones((2 * n, x.size))
|
||||
y[::2] = x
|
||||
return x
|
||||
|
||||
|
||||
def big_fun_with_parameters(x, y, p):
|
||||
""" Big version of sl_fun, with two parameters.
|
||||
|
||||
The two differential equations represented by sl_fun are broadcast to the
|
||||
number of rows of y, rotating between the parameters p[0] and p[1].
|
||||
Here are the differential equations:
|
||||
|
||||
dy[0]/dt = y[1]
|
||||
dy[1]/dt = -p[0]**2 * y[0]
|
||||
dy[2]/dt = y[3]
|
||||
dy[3]/dt = -p[1]**2 * y[2]
|
||||
dy[4]/dt = y[5]
|
||||
dy[5]/dt = -p[0]**2 * y[4]
|
||||
dy[6]/dt = y[7]
|
||||
dy[7]/dt = -p[1]**2 * y[6]
|
||||
.
|
||||
.
|
||||
.
|
||||
|
||||
"""
|
||||
f = np.zeros_like(y)
|
||||
f[::2] = y[1::2]
|
||||
f[1::4] = -p[0]**2 * y[::4]
|
||||
f[3::4] = -p[1]**2 * y[2::4]
|
||||
return f
|
||||
|
||||
|
||||
def big_fun_with_parameters_jac(x, y, p):
|
||||
# big version of sl_fun_jac, with two parameters
|
||||
n, m = y.shape
|
||||
df_dy = np.zeros((n, n, m))
|
||||
df_dy[range(0, n, 2), range(1, n, 2)] = 1
|
||||
df_dy[range(1, n, 4), range(0, n, 4)] = -p[0]**2
|
||||
df_dy[range(3, n, 4), range(2, n, 4)] = -p[1]**2
|
||||
|
||||
df_dp = np.zeros((n, 2, m))
|
||||
df_dp[range(1, n, 4), 0] = -2 * p[0] * y[range(0, n, 4)]
|
||||
df_dp[range(3, n, 4), 1] = -2 * p[1] * y[range(2, n, 4)]
|
||||
|
||||
return df_dy, df_dp
|
||||
|
||||
|
||||
def big_bc_with_parameters(ya, yb, p):
|
||||
# big version of sl_bc, with two parameters
|
||||
return np.hstack((ya[::2], yb[::2], ya[1] - p[0], ya[3] - p[1]))
|
||||
|
||||
|
||||
def big_bc_with_parameters_jac(ya, yb, p):
|
||||
# big version of sl_bc_jac, with two parameters
|
||||
n = ya.shape[0]
|
||||
dbc_dya = np.zeros((n + 2, n))
|
||||
dbc_dyb = np.zeros((n + 2, n))
|
||||
|
||||
dbc_dya[range(n // 2), range(0, n, 2)] = 1
|
||||
dbc_dyb[range(n // 2, n), range(0, n, 2)] = 1
|
||||
|
||||
dbc_dp = np.zeros((n + 2, 2))
|
||||
dbc_dp[n, 0] = -1
|
||||
dbc_dya[n, 1] = 1
|
||||
dbc_dp[n + 1, 1] = -1
|
||||
dbc_dya[n + 1, 3] = 1
|
||||
|
||||
return dbc_dya, dbc_dyb, dbc_dp
|
||||
|
||||
|
||||
def big_sol_with_parameters(x, p):
|
||||
# big version of sl_sol, with two parameters
|
||||
return np.vstack((np.sin(p[0] * x), np.sin(p[1] * x)))
|
||||
|
||||
|
||||
def shock_fun(x, y):
|
||||
eps = 1e-3
|
||||
return np.vstack((
|
||||
y[1],
|
||||
-(x * y[1] + eps * np.pi**2 * np.cos(np.pi * x) +
|
||||
np.pi * x * np.sin(np.pi * x)) / eps
|
||||
))
|
||||
|
||||
|
||||
def shock_bc(ya, yb):
|
||||
return np.array([ya[0] + 2, yb[0]])
|
||||
|
||||
|
||||
def shock_sol(x):
|
||||
eps = 1e-3
|
||||
k = np.sqrt(2 * eps)
|
||||
return np.cos(np.pi * x) + erf(x / k) / erf(1 / k)
|
||||
|
||||
|
||||
def nonlin_bc_fun(x, y):
|
||||
# laplace eq.
|
||||
return np.stack([y[1], np.zeros_like(x)])
|
||||
|
||||
|
||||
def nonlin_bc_bc(ya, yb):
|
||||
phiA, phipA = ya
|
||||
phiC, phipC = yb
|
||||
|
||||
kappa, ioA, ioC, V, f = 1.64, 0.01, 1.0e-4, 0.5, 38.9
|
||||
|
||||
# Butler-Volmer Kinetics at Anode
|
||||
hA = 0.0-phiA-0.0
|
||||
iA = ioA * (np.exp(f*hA) - np.exp(-f*hA))
|
||||
res0 = iA + kappa * phipA
|
||||
|
||||
# Butler-Volmer Kinetics at Cathode
|
||||
hC = V - phiC - 1.0
|
||||
iC = ioC * (np.exp(f*hC) - np.exp(-f*hC))
|
||||
res1 = iC - kappa*phipC
|
||||
|
||||
return np.array([res0, res1])
|
||||
|
||||
|
||||
def nonlin_bc_sol(x):
|
||||
return -0.13426436116763119 - 1.1308709 * x
|
||||
|
||||
|
||||
def test_modify_mesh():
|
||||
x = np.array([0, 1, 3, 9], dtype=float)
|
||||
x_new = modify_mesh(x, np.array([0]), np.array([2]))
|
||||
assert_array_equal(x_new, np.array([0, 0.5, 1, 3, 5, 7, 9]))
|
||||
|
||||
x = np.array([-6, -3, 0, 3, 6], dtype=float)
|
||||
x_new = modify_mesh(x, np.array([1], dtype=int), np.array([0, 2, 3]))
|
||||
assert_array_equal(x_new, [-6, -5, -4, -3, -1.5, 0, 1, 2, 3, 4, 5, 6])
|
||||
|
||||
|
||||
def test_compute_fun_jac():
|
||||
x = np.linspace(0, 1, 5)
|
||||
y = np.empty((2, x.shape[0]))
|
||||
y[0] = 0.01
|
||||
y[1] = 0.02
|
||||
p = np.array([])
|
||||
df_dy, df_dp = estimate_fun_jac(lambda x, y, p: exp_fun(x, y), x, y, p)
|
||||
df_dy_an = exp_fun_jac(x, y)
|
||||
assert_allclose(df_dy, df_dy_an)
|
||||
assert_(df_dp is None)
|
||||
|
||||
x = np.linspace(0, np.pi, 5)
|
||||
y = np.empty((2, x.shape[0]))
|
||||
y[0] = np.sin(x)
|
||||
y[1] = np.cos(x)
|
||||
p = np.array([1.0])
|
||||
df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
|
||||
df_dy_an, df_dp_an = sl_fun_jac(x, y, p)
|
||||
assert_allclose(df_dy, df_dy_an)
|
||||
assert_allclose(df_dp, df_dp_an)
|
||||
|
||||
x = np.linspace(0, 1, 10)
|
||||
y = np.empty((2, x.shape[0]))
|
||||
y[0] = (3/4)**0.5
|
||||
y[1] = 1e-4
|
||||
p = np.array([])
|
||||
df_dy, df_dp = estimate_fun_jac(lambda x, y, p: emden_fun(x, y), x, y, p)
|
||||
df_dy_an = emden_fun_jac(x, y)
|
||||
assert_allclose(df_dy, df_dy_an)
|
||||
assert_(df_dp is None)
|
||||
|
||||
|
||||
def test_compute_bc_jac():
|
||||
ya = np.array([-1.0, 2])
|
||||
yb = np.array([0.5, 3])
|
||||
p = np.array([])
|
||||
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
|
||||
lambda ya, yb, p: exp_bc(ya, yb), ya, yb, p)
|
||||
dbc_dya_an, dbc_dyb_an = exp_bc_jac(ya, yb)
|
||||
assert_allclose(dbc_dya, dbc_dya_an)
|
||||
assert_allclose(dbc_dyb, dbc_dyb_an)
|
||||
assert_(dbc_dp is None)
|
||||
|
||||
ya = np.array([0.0, 1])
|
||||
yb = np.array([0.0, -1])
|
||||
p = np.array([0.5])
|
||||
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, ya, yb, p)
|
||||
dbc_dya_an, dbc_dyb_an, dbc_dp_an = sl_bc_jac(ya, yb, p)
|
||||
assert_allclose(dbc_dya, dbc_dya_an)
|
||||
assert_allclose(dbc_dyb, dbc_dyb_an)
|
||||
assert_allclose(dbc_dp, dbc_dp_an)
|
||||
|
||||
ya = np.array([0.5, 100])
|
||||
yb = np.array([-1000, 10.5])
|
||||
p = np.array([])
|
||||
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
|
||||
lambda ya, yb, p: emden_bc(ya, yb), ya, yb, p)
|
||||
dbc_dya_an, dbc_dyb_an = emden_bc_jac(ya, yb)
|
||||
assert_allclose(dbc_dya, dbc_dya_an)
|
||||
assert_allclose(dbc_dyb, dbc_dyb_an)
|
||||
assert_(dbc_dp is None)
|
||||
|
||||
|
||||
def test_compute_jac_indices():
|
||||
n = 2
|
||||
m = 4
|
||||
k = 2
|
||||
i, j = compute_jac_indices(n, m, k)
|
||||
s = coo_matrix((np.ones_like(i), (i, j))).toarray()
|
||||
s_true = np.array([
|
||||
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
|
||||
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
|
||||
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
|
||||
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
|
||||
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
|
||||
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
|
||||
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
|
||||
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
|
||||
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
|
||||
])
|
||||
assert_array_equal(s, s_true)
|
||||
|
||||
|
||||
def test_compute_global_jac():
|
||||
n = 2
|
||||
m = 5
|
||||
k = 1
|
||||
i_jac, j_jac = compute_jac_indices(2, 5, 1)
|
||||
x = np.linspace(0, 1, 5)
|
||||
h = np.diff(x)
|
||||
y = np.vstack((np.sin(np.pi * x), np.pi * np.cos(np.pi * x)))
|
||||
p = np.array([3.0])
|
||||
|
||||
f = sl_fun(x, y, p)
|
||||
|
||||
x_middle = x[:-1] + 0.5 * h
|
||||
y_middle = 0.5 * (y[:, :-1] + y[:, 1:]) - h/8 * (f[:, 1:] - f[:, :-1])
|
||||
|
||||
df_dy, df_dp = sl_fun_jac(x, y, p)
|
||||
df_dy_middle, df_dp_middle = sl_fun_jac(x_middle, y_middle, p)
|
||||
dbc_dya, dbc_dyb, dbc_dp = sl_bc_jac(y[:, 0], y[:, -1], p)
|
||||
|
||||
J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
|
||||
df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
|
||||
J = J.toarray()
|
||||
|
||||
def J_block(h, p):
|
||||
return np.array([
|
||||
[h**2*p**2/12 - 1, -0.5*h, -h**2*p**2/12 + 1, -0.5*h],
|
||||
[0.5*h*p**2, h**2*p**2/12 - 1, 0.5*h*p**2, 1 - h**2*p**2/12]
|
||||
])
|
||||
|
||||
J_true = np.zeros((m * n + k, m * n + k))
|
||||
for i in range(m - 1):
|
||||
J_true[i * n: (i + 1) * n, i * n: (i + 2) * n] = J_block(h[i], p[0])
|
||||
|
||||
J_true[:(m - 1) * n:2, -1] = p * h**2/6 * (y[0, :-1] - y[0, 1:])
|
||||
J_true[1:(m - 1) * n:2, -1] = p * (h * (y[0, :-1] + y[0, 1:]) +
|
||||
h**2/6 * (y[1, :-1] - y[1, 1:]))
|
||||
|
||||
J_true[8, 0] = 1
|
||||
J_true[9, 8] = 1
|
||||
J_true[10, 1] = 1
|
||||
J_true[10, 10] = -1
|
||||
|
||||
assert_allclose(J, J_true, rtol=1e-10)
|
||||
|
||||
df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
|
||||
df_dy_middle, df_dp_middle = estimate_fun_jac(sl_fun, x_middle, y_middle, p)
|
||||
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, y[:, 0], y[:, -1], p)
|
||||
J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
|
||||
df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
|
||||
J = J.toarray()
|
||||
assert_allclose(J, J_true, rtol=2e-8, atol=2e-8)
|
||||
|
||||
|
||||
def test_parameter_validation():
|
||||
x = [0, 1, 0.5]
|
||||
y = np.zeros((2, 3))
|
||||
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
|
||||
|
||||
x = np.linspace(0, 1, 5)
|
||||
y = np.zeros((2, 4))
|
||||
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
|
||||
|
||||
def fun(x, y, p):
|
||||
return exp_fun(x, y)
|
||||
def bc(ya, yb, p):
|
||||
return exp_bc(ya, yb)
|
||||
|
||||
y = np.zeros((2, x.shape[0]))
|
||||
assert_raises(ValueError, solve_bvp, fun, bc, x, y, p=[1])
|
||||
|
||||
def wrong_shape_fun(x, y):
|
||||
return np.zeros(3)
|
||||
|
||||
assert_raises(ValueError, solve_bvp, wrong_shape_fun, bc, x, y)
|
||||
|
||||
S = np.array([[0, 0]])
|
||||
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y, S=S)
|
||||
|
||||
|
||||
def test_no_params():
|
||||
x = np.linspace(0, 1, 5)
|
||||
x_test = np.linspace(0, 1, 100)
|
||||
y = np.zeros((2, x.shape[0]))
|
||||
for fun_jac in [None, exp_fun_jac]:
|
||||
for bc_jac in [None, exp_bc_jac]:
|
||||
sol = solve_bvp(exp_fun, exp_bc, x, y, fun_jac=fun_jac,
|
||||
bc_jac=bc_jac)
|
||||
|
||||
assert_equal(sol.status, 0)
|
||||
assert_(sol.success)
|
||||
|
||||
assert_equal(sol.x.size, 5)
|
||||
|
||||
sol_test = sol.sol(x_test)
|
||||
|
||||
assert_allclose(sol_test[0], exp_sol(x_test), atol=1e-5)
|
||||
|
||||
f_test = exp_fun(x_test, sol_test)
|
||||
r = sol.sol(x_test, 1) - f_test
|
||||
rel_res = r / (1 + np.abs(f_test))
|
||||
norm_res = np.sum(rel_res**2, axis=0)**0.5
|
||||
assert_(np.all(norm_res < 1e-3))
|
||||
|
||||
assert_(np.all(sol.rms_residuals < 1e-3))
|
||||
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
|
||||
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
|
||||
|
||||
|
||||
def test_with_params():
|
||||
x = np.linspace(0, np.pi, 5)
|
||||
x_test = np.linspace(0, np.pi, 100)
|
||||
y = np.ones((2, x.shape[0]))
|
||||
|
||||
for fun_jac in [None, sl_fun_jac]:
|
||||
for bc_jac in [None, sl_bc_jac]:
|
||||
sol = solve_bvp(sl_fun, sl_bc, x, y, p=[0.5], fun_jac=fun_jac,
|
||||
bc_jac=bc_jac)
|
||||
|
||||
assert_equal(sol.status, 0)
|
||||
assert_(sol.success)
|
||||
|
||||
assert_(sol.x.size < 10)
|
||||
|
||||
assert_allclose(sol.p, [1], rtol=1e-4)
|
||||
|
||||
sol_test = sol.sol(x_test)
|
||||
|
||||
assert_allclose(sol_test[0], sl_sol(x_test, [1]),
|
||||
rtol=1e-4, atol=1e-4)
|
||||
|
||||
f_test = sl_fun(x_test, sol_test, [1])
|
||||
r = sol.sol(x_test, 1) - f_test
|
||||
rel_res = r / (1 + np.abs(f_test))
|
||||
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
|
||||
assert_(np.all(norm_res < 1e-3))
|
||||
|
||||
assert_(np.all(sol.rms_residuals < 1e-3))
|
||||
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
|
||||
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
|
||||
|
||||
|
||||
def test_singular_term():
|
||||
x = np.linspace(0, 1, 10)
|
||||
x_test = np.linspace(0.05, 1, 100)
|
||||
y = np.empty((2, 10))
|
||||
y[0] = (3/4)**0.5
|
||||
y[1] = 1e-4
|
||||
S = np.array([[0, 0], [0, -2]])
|
||||
|
||||
for fun_jac in [None, emden_fun_jac]:
|
||||
for bc_jac in [None, emden_bc_jac]:
|
||||
sol = solve_bvp(emden_fun, emden_bc, x, y, S=S, fun_jac=fun_jac,
|
||||
bc_jac=bc_jac)
|
||||
|
||||
assert_equal(sol.status, 0)
|
||||
assert_(sol.success)
|
||||
|
||||
assert_equal(sol.x.size, 10)
|
||||
|
||||
sol_test = sol.sol(x_test)
|
||||
assert_allclose(sol_test[0], emden_sol(x_test), atol=1e-5)
|
||||
|
||||
f_test = emden_fun(x_test, sol_test) + S.dot(sol_test) / x_test
|
||||
r = sol.sol(x_test, 1) - f_test
|
||||
rel_res = r / (1 + np.abs(f_test))
|
||||
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
|
||||
|
||||
assert_(np.all(norm_res < 1e-3))
|
||||
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
|
||||
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
|
||||
|
||||
|
||||
def test_complex():
|
||||
# The test is essentially the same as test_no_params, but boundary
|
||||
# conditions are turned into complex.
|
||||
x = np.linspace(0, 1, 5)
|
||||
x_test = np.linspace(0, 1, 100)
|
||||
y = np.zeros((2, x.shape[0]), dtype=complex)
|
||||
for fun_jac in [None, exp_fun_jac]:
|
||||
for bc_jac in [None, exp_bc_jac]:
|
||||
sol = solve_bvp(exp_fun, exp_bc_complex, x, y, fun_jac=fun_jac,
|
||||
bc_jac=bc_jac)
|
||||
|
||||
assert_equal(sol.status, 0)
|
||||
assert_(sol.success)
|
||||
|
||||
sol_test = sol.sol(x_test)
|
||||
|
||||
assert_allclose(sol_test[0].real, exp_sol(x_test), atol=1e-5)
|
||||
assert_allclose(sol_test[0].imag, exp_sol(x_test), atol=1e-5)
|
||||
|
||||
f_test = exp_fun(x_test, sol_test)
|
||||
r = sol.sol(x_test, 1) - f_test
|
||||
rel_res = r / (1 + np.abs(f_test))
|
||||
norm_res = np.sum(np.real(rel_res * np.conj(rel_res)),
|
||||
axis=0) ** 0.5
|
||||
assert_(np.all(norm_res < 1e-3))
|
||||
|
||||
assert_(np.all(sol.rms_residuals < 1e-3))
|
||||
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
|
||||
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
|
||||
|
||||
|
||||
def test_failures():
|
||||
x = np.linspace(0, 1, 2)
|
||||
y = np.zeros((2, x.size))
|
||||
res = solve_bvp(exp_fun, exp_bc, x, y, tol=1e-5, max_nodes=5)
|
||||
assert_equal(res.status, 1)
|
||||
assert_(not res.success)
|
||||
|
||||
x = np.linspace(0, 1, 5)
|
||||
y = np.zeros((2, x.size))
|
||||
res = solve_bvp(undefined_fun, undefined_bc, x, y)
|
||||
assert_equal(res.status, 2)
|
||||
assert_(not res.success)
|
||||
|
||||
|
||||
def test_big_problem():
|
||||
n = 30
|
||||
x = np.linspace(0, 1, 5)
|
||||
y = np.zeros((2 * n, x.size))
|
||||
sol = solve_bvp(big_fun, big_bc, x, y)
|
||||
|
||||
assert_equal(sol.status, 0)
|
||||
assert_(sol.success)
|
||||
|
||||
sol_test = sol.sol(x)
|
||||
|
||||
assert_allclose(sol_test[0], big_sol(x, n))
|
||||
|
||||
f_test = big_fun(x, sol_test)
|
||||
r = sol.sol(x, 1) - f_test
|
||||
rel_res = r / (1 + np.abs(f_test))
|
||||
norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), axis=0) ** 0.5
|
||||
assert_(np.all(norm_res < 1e-3))
|
||||
|
||||
assert_(np.all(sol.rms_residuals < 1e-3))
|
||||
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
|
||||
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
|
||||
|
||||
|
||||
def test_big_problem_with_parameters():
|
||||
n = 30
|
||||
x = np.linspace(0, np.pi, 5)
|
||||
x_test = np.linspace(0, np.pi, 100)
|
||||
y = np.ones((2 * n, x.size))
|
||||
|
||||
for fun_jac in [None, big_fun_with_parameters_jac]:
|
||||
for bc_jac in [None, big_bc_with_parameters_jac]:
|
||||
sol = solve_bvp(big_fun_with_parameters, big_bc_with_parameters, x,
|
||||
y, p=[0.5, 0.5], fun_jac=fun_jac, bc_jac=bc_jac)
|
||||
|
||||
assert_equal(sol.status, 0)
|
||||
assert_(sol.success)
|
||||
|
||||
assert_allclose(sol.p, [1, 1], rtol=1e-4)
|
||||
|
||||
sol_test = sol.sol(x_test)
|
||||
|
||||
for isol in range(0, n, 4):
|
||||
assert_allclose(sol_test[isol],
|
||||
big_sol_with_parameters(x_test, [1, 1])[0],
|
||||
rtol=1e-4, atol=1e-4)
|
||||
assert_allclose(sol_test[isol + 2],
|
||||
big_sol_with_parameters(x_test, [1, 1])[1],
|
||||
rtol=1e-4, atol=1e-4)
|
||||
|
||||
f_test = big_fun_with_parameters(x_test, sol_test, [1, 1])
|
||||
r = sol.sol(x_test, 1) - f_test
|
||||
rel_res = r / (1 + np.abs(f_test))
|
||||
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
|
||||
assert_(np.all(norm_res < 1e-3))
|
||||
|
||||
assert_(np.all(sol.rms_residuals < 1e-3))
|
||||
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
|
||||
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
|
||||
|
||||
|
||||
def test_shock_layer():
|
||||
x = np.linspace(-1, 1, 5)
|
||||
x_test = np.linspace(-1, 1, 100)
|
||||
y = np.zeros((2, x.size))
|
||||
sol = solve_bvp(shock_fun, shock_bc, x, y)
|
||||
|
||||
assert_equal(sol.status, 0)
|
||||
assert_(sol.success)
|
||||
|
||||
assert_(sol.x.size < 110)
|
||||
|
||||
sol_test = sol.sol(x_test)
|
||||
assert_allclose(sol_test[0], shock_sol(x_test), rtol=1e-5, atol=1e-5)
|
||||
|
||||
f_test = shock_fun(x_test, sol_test)
|
||||
r = sol.sol(x_test, 1) - f_test
|
||||
rel_res = r / (1 + np.abs(f_test))
|
||||
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
|
||||
|
||||
assert_(np.all(norm_res < 1e-3))
|
||||
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
|
||||
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
|
||||
|
||||
|
||||
def test_nonlin_bc():
|
||||
x = np.linspace(0, 0.1, 5)
|
||||
x_test = x
|
||||
y = np.zeros([2, x.size])
|
||||
sol = solve_bvp(nonlin_bc_fun, nonlin_bc_bc, x, y)
|
||||
|
||||
assert_equal(sol.status, 0)
|
||||
assert_(sol.success)
|
||||
|
||||
assert_(sol.x.size < 8)
|
||||
|
||||
sol_test = sol.sol(x_test)
|
||||
assert_allclose(sol_test[0], nonlin_bc_sol(x_test), rtol=1e-5, atol=1e-5)
|
||||
|
||||
f_test = nonlin_bc_fun(x_test, sol_test)
|
||||
r = sol.sol(x_test, 1) - f_test
|
||||
rel_res = r / (1 + np.abs(f_test))
|
||||
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
|
||||
|
||||
assert_(np.all(norm_res < 1e-3))
|
||||
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
|
||||
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
|
||||
|
||||
|
||||
def test_verbose():
|
||||
# Smoke test that checks the printing does something and does not crash
|
||||
x = np.linspace(0, 1, 5)
|
||||
y = np.zeros((2, x.shape[0]))
|
||||
for verbose in [0, 1, 2]:
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = StringIO()
|
||||
try:
|
||||
sol = solve_bvp(exp_fun, exp_bc, x, y, verbose=verbose)
|
||||
text = sys.stdout.getvalue()
|
||||
finally:
|
||||
sys.stdout = old_stdout
|
||||
|
||||
assert_(sol.success)
|
||||
if verbose == 0:
|
||||
assert_(not text, text)
|
||||
if verbose >= 1:
|
||||
assert_("Solved in" in text, text)
|
||||
if verbose >= 2:
|
||||
assert_("Max residual" in text, text)
|
||||
@ -0,0 +1,834 @@
|
||||
# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers
|
||||
"""
|
||||
Tests for numerical integration.
|
||||
"""
|
||||
import numpy as np
|
||||
from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp,
|
||||
allclose)
|
||||
|
||||
from numpy.testing import (
|
||||
assert_, assert_array_almost_equal,
|
||||
assert_allclose, assert_array_equal, assert_equal, assert_warns)
|
||||
from pytest import raises as assert_raises
|
||||
from scipy.integrate import odeint, ode, complex_ode
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Test ODE integrators
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestOdeint:
|
||||
# Check integrate.odeint
|
||||
|
||||
def _do_problem(self, problem):
|
||||
t = arange(0.0, problem.stop_t, 0.05)
|
||||
|
||||
# Basic case
|
||||
z, infodict = odeint(problem.f, problem.z0, t, full_output=True)
|
||||
assert_(problem.verify(z, t))
|
||||
|
||||
# Use tfirst=True
|
||||
z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
|
||||
full_output=True, tfirst=True)
|
||||
assert_(problem.verify(z, t))
|
||||
|
||||
if hasattr(problem, 'jac'):
|
||||
# Use Dfun
|
||||
z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac,
|
||||
full_output=True)
|
||||
assert_(problem.verify(z, t))
|
||||
|
||||
# Use Dfun and tfirst=True
|
||||
z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
|
||||
Dfun=lambda t, y: problem.jac(y, t),
|
||||
full_output=True, tfirst=True)
|
||||
assert_(problem.verify(z, t))
|
||||
|
||||
def test_odeint(self):
|
||||
for problem_cls in PROBLEMS:
|
||||
problem = problem_cls()
|
||||
if problem.cmplx:
|
||||
continue
|
||||
self._do_problem(problem)
|
||||
|
||||
|
||||
class TestODEClass:
|
||||
|
||||
ode_class = None # Set in subclass.
|
||||
|
||||
def _do_problem(self, problem, integrator, method='adams'):
|
||||
|
||||
# ode has callback arguments in different order than odeint
|
||||
def f(t, z):
|
||||
return problem.f(z, t)
|
||||
jac = None
|
||||
if hasattr(problem, 'jac'):
|
||||
def jac(t, z):
|
||||
return problem.jac(z, t)
|
||||
|
||||
integrator_params = {}
|
||||
if problem.lband is not None or problem.uband is not None:
|
||||
integrator_params['uband'] = problem.uband
|
||||
integrator_params['lband'] = problem.lband
|
||||
|
||||
ig = self.ode_class(f, jac)
|
||||
ig.set_integrator(integrator,
|
||||
atol=problem.atol/10,
|
||||
rtol=problem.rtol/10,
|
||||
method=method,
|
||||
**integrator_params)
|
||||
|
||||
ig.set_initial_value(problem.z0, t=0.0)
|
||||
z = ig.integrate(problem.stop_t)
|
||||
|
||||
assert_array_equal(z, ig.y)
|
||||
assert_(ig.successful(), (problem, method))
|
||||
assert_(ig.get_return_code() > 0, (problem, method))
|
||||
assert_(problem.verify(array([z]), problem.stop_t), (problem, method))
|
||||
|
||||
|
||||
class TestOde(TestODEClass):
|
||||
|
||||
ode_class = ode
|
||||
|
||||
def test_vode(self):
|
||||
# Check the vode solver
|
||||
for problem_cls in PROBLEMS:
|
||||
problem = problem_cls()
|
||||
if problem.cmplx:
|
||||
continue
|
||||
if not problem.stiff:
|
||||
self._do_problem(problem, 'vode', 'adams')
|
||||
self._do_problem(problem, 'vode', 'bdf')
|
||||
|
||||
def test_zvode(self):
|
||||
# Check the zvode solver
|
||||
for problem_cls in PROBLEMS:
|
||||
problem = problem_cls()
|
||||
if not problem.stiff:
|
||||
self._do_problem(problem, 'zvode', 'adams')
|
||||
self._do_problem(problem, 'zvode', 'bdf')
|
||||
|
||||
def test_lsoda(self):
|
||||
# Check the lsoda solver
|
||||
for problem_cls in PROBLEMS:
|
||||
problem = problem_cls()
|
||||
if problem.cmplx:
|
||||
continue
|
||||
self._do_problem(problem, 'lsoda')
|
||||
|
||||
def test_dopri5(self):
|
||||
# Check the dopri5 solver
|
||||
for problem_cls in PROBLEMS:
|
||||
problem = problem_cls()
|
||||
if problem.cmplx:
|
||||
continue
|
||||
if problem.stiff:
|
||||
continue
|
||||
if hasattr(problem, 'jac'):
|
||||
continue
|
||||
self._do_problem(problem, 'dopri5')
|
||||
|
||||
def test_dop853(self):
|
||||
# Check the dop853 solver
|
||||
for problem_cls in PROBLEMS:
|
||||
problem = problem_cls()
|
||||
if problem.cmplx:
|
||||
continue
|
||||
if problem.stiff:
|
||||
continue
|
||||
if hasattr(problem, 'jac'):
|
||||
continue
|
||||
self._do_problem(problem, 'dop853')
|
||||
|
||||
def test_concurrent_fail(self):
|
||||
for sol in ('vode', 'zvode', 'lsoda'):
|
||||
def f(t, y):
|
||||
return 1.0
|
||||
|
||||
r = ode(f).set_integrator(sol)
|
||||
r.set_initial_value(0, 0)
|
||||
|
||||
r2 = ode(f).set_integrator(sol)
|
||||
r2.set_initial_value(0, 0)
|
||||
|
||||
r.integrate(r.t + 0.1)
|
||||
r2.integrate(r2.t + 0.1)
|
||||
|
||||
assert_raises(RuntimeError, r.integrate, r.t + 0.1)
|
||||
|
||||
def test_concurrent_ok(self):
|
||||
def f(t, y):
|
||||
return 1.0
|
||||
|
||||
for k in range(3):
|
||||
for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
|
||||
r = ode(f).set_integrator(sol)
|
||||
r.set_initial_value(0, 0)
|
||||
|
||||
r2 = ode(f).set_integrator(sol)
|
||||
r2.set_initial_value(0, 0)
|
||||
|
||||
r.integrate(r.t + 0.1)
|
||||
r2.integrate(r2.t + 0.1)
|
||||
r2.integrate(r2.t + 0.1)
|
||||
|
||||
assert_allclose(r.y, 0.1)
|
||||
assert_allclose(r2.y, 0.2)
|
||||
|
||||
for sol in ('dopri5', 'dop853'):
|
||||
r = ode(f).set_integrator(sol)
|
||||
r.set_initial_value(0, 0)
|
||||
|
||||
r2 = ode(f).set_integrator(sol)
|
||||
r2.set_initial_value(0, 0)
|
||||
|
||||
r.integrate(r.t + 0.1)
|
||||
r.integrate(r.t + 0.1)
|
||||
r2.integrate(r2.t + 0.1)
|
||||
r.integrate(r.t + 0.1)
|
||||
r2.integrate(r2.t + 0.1)
|
||||
|
||||
assert_allclose(r.y, 0.3)
|
||||
assert_allclose(r2.y, 0.2)
|
||||
|
||||
|
||||
class TestComplexOde(TestODEClass):
|
||||
|
||||
ode_class = complex_ode
|
||||
|
||||
def test_vode(self):
|
||||
# Check the vode solver
|
||||
for problem_cls in PROBLEMS:
|
||||
problem = problem_cls()
|
||||
if not problem.stiff:
|
||||
self._do_problem(problem, 'vode', 'adams')
|
||||
else:
|
||||
self._do_problem(problem, 'vode', 'bdf')
|
||||
|
||||
def test_lsoda(self):
|
||||
# Check the lsoda solver
|
||||
for problem_cls in PROBLEMS:
|
||||
problem = problem_cls()
|
||||
self._do_problem(problem, 'lsoda')
|
||||
|
||||
def test_dopri5(self):
|
||||
# Check the dopri5 solver
|
||||
for problem_cls in PROBLEMS:
|
||||
problem = problem_cls()
|
||||
if problem.stiff:
|
||||
continue
|
||||
if hasattr(problem, 'jac'):
|
||||
continue
|
||||
self._do_problem(problem, 'dopri5')
|
||||
|
||||
def test_dop853(self):
|
||||
# Check the dop853 solver
|
||||
for problem_cls in PROBLEMS:
|
||||
problem = problem_cls()
|
||||
if problem.stiff:
|
||||
continue
|
||||
if hasattr(problem, 'jac'):
|
||||
continue
|
||||
self._do_problem(problem, 'dop853')
|
||||
|
||||
|
||||
class TestSolout:
|
||||
# Check integrate.ode correctly handles solout for dopri5 and dop853
|
||||
def _run_solout_test(self, integrator):
|
||||
# Check correct usage of solout
|
||||
ts = []
|
||||
ys = []
|
||||
t0 = 0.0
|
||||
tend = 10.0
|
||||
y0 = [1.0, 2.0]
|
||||
|
||||
def solout(t, y):
|
||||
ts.append(t)
|
||||
ys.append(y.copy())
|
||||
|
||||
def rhs(t, y):
|
||||
return [y[0] + y[1], -y[1]**2]
|
||||
|
||||
ig = ode(rhs).set_integrator(integrator)
|
||||
ig.set_solout(solout)
|
||||
ig.set_initial_value(y0, t0)
|
||||
ret = ig.integrate(tend)
|
||||
assert_array_equal(ys[0], y0)
|
||||
assert_array_equal(ys[-1], ret)
|
||||
assert_equal(ts[0], t0)
|
||||
assert_equal(ts[-1], tend)
|
||||
|
||||
def test_solout(self):
|
||||
for integrator in ('dopri5', 'dop853'):
|
||||
self._run_solout_test(integrator)
|
||||
|
||||
def _run_solout_after_initial_test(self, integrator):
|
||||
# Check if solout works even if it is set after the initial value.
|
||||
ts = []
|
||||
ys = []
|
||||
t0 = 0.0
|
||||
tend = 10.0
|
||||
y0 = [1.0, 2.0]
|
||||
|
||||
def solout(t, y):
|
||||
ts.append(t)
|
||||
ys.append(y.copy())
|
||||
|
||||
def rhs(t, y):
|
||||
return [y[0] + y[1], -y[1]**2]
|
||||
|
||||
ig = ode(rhs).set_integrator(integrator)
|
||||
ig.set_initial_value(y0, t0)
|
||||
ig.set_solout(solout)
|
||||
ret = ig.integrate(tend)
|
||||
assert_array_equal(ys[0], y0)
|
||||
assert_array_equal(ys[-1], ret)
|
||||
assert_equal(ts[0], t0)
|
||||
assert_equal(ts[-1], tend)
|
||||
|
||||
def test_solout_after_initial(self):
|
||||
for integrator in ('dopri5', 'dop853'):
|
||||
self._run_solout_after_initial_test(integrator)
|
||||
|
||||
def _run_solout_break_test(self, integrator):
|
||||
# Check correct usage of stopping via solout
|
||||
ts = []
|
||||
ys = []
|
||||
t0 = 0.0
|
||||
tend = 10.0
|
||||
y0 = [1.0, 2.0]
|
||||
|
||||
def solout(t, y):
|
||||
ts.append(t)
|
||||
ys.append(y.copy())
|
||||
if t > tend/2.0:
|
||||
return -1
|
||||
|
||||
def rhs(t, y):
|
||||
return [y[0] + y[1], -y[1]**2]
|
||||
|
||||
ig = ode(rhs).set_integrator(integrator)
|
||||
ig.set_solout(solout)
|
||||
ig.set_initial_value(y0, t0)
|
||||
ret = ig.integrate(tend)
|
||||
assert_array_equal(ys[0], y0)
|
||||
assert_array_equal(ys[-1], ret)
|
||||
assert_equal(ts[0], t0)
|
||||
assert_(ts[-1] > tend/2.0)
|
||||
assert_(ts[-1] < tend)
|
||||
|
||||
def test_solout_break(self):
|
||||
for integrator in ('dopri5', 'dop853'):
|
||||
self._run_solout_break_test(integrator)
|
||||
|
||||
|
||||
class TestComplexSolout:
|
||||
# Check integrate.ode correctly handles solout for dopri5 and dop853
|
||||
def _run_solout_test(self, integrator):
|
||||
# Check correct usage of solout
|
||||
ts = []
|
||||
ys = []
|
||||
t0 = 0.0
|
||||
tend = 20.0
|
||||
y0 = [0.0]
|
||||
|
||||
def solout(t, y):
|
||||
ts.append(t)
|
||||
ys.append(y.copy())
|
||||
|
||||
def rhs(t, y):
|
||||
return [1.0/(t - 10.0 - 1j)]
|
||||
|
||||
ig = complex_ode(rhs).set_integrator(integrator)
|
||||
ig.set_solout(solout)
|
||||
ig.set_initial_value(y0, t0)
|
||||
ret = ig.integrate(tend)
|
||||
assert_array_equal(ys[0], y0)
|
||||
assert_array_equal(ys[-1], ret)
|
||||
assert_equal(ts[0], t0)
|
||||
assert_equal(ts[-1], tend)
|
||||
|
||||
def test_solout(self):
|
||||
for integrator in ('dopri5', 'dop853'):
|
||||
self._run_solout_test(integrator)
|
||||
|
||||
def _run_solout_break_test(self, integrator):
|
||||
# Check correct usage of stopping via solout
|
||||
ts = []
|
||||
ys = []
|
||||
t0 = 0.0
|
||||
tend = 20.0
|
||||
y0 = [0.0]
|
||||
|
||||
def solout(t, y):
|
||||
ts.append(t)
|
||||
ys.append(y.copy())
|
||||
if t > tend/2.0:
|
||||
return -1
|
||||
|
||||
def rhs(t, y):
|
||||
return [1.0/(t - 10.0 - 1j)]
|
||||
|
||||
ig = complex_ode(rhs).set_integrator(integrator)
|
||||
ig.set_solout(solout)
|
||||
ig.set_initial_value(y0, t0)
|
||||
ret = ig.integrate(tend)
|
||||
assert_array_equal(ys[0], y0)
|
||||
assert_array_equal(ys[-1], ret)
|
||||
assert_equal(ts[0], t0)
|
||||
assert_(ts[-1] > tend/2.0)
|
||||
assert_(ts[-1] < tend)
|
||||
|
||||
def test_solout_break(self):
|
||||
for integrator in ('dopri5', 'dop853'):
|
||||
self._run_solout_break_test(integrator)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Test problems
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ODE:
|
||||
"""
|
||||
ODE problem
|
||||
"""
|
||||
stiff = False
|
||||
cmplx = False
|
||||
stop_t = 1
|
||||
z0 = []
|
||||
|
||||
lband = None
|
||||
uband = None
|
||||
|
||||
atol = 1e-6
|
||||
rtol = 1e-5
|
||||
|
||||
|
||||
class SimpleOscillator(ODE):
|
||||
r"""
|
||||
Free vibration of a simple oscillator::
|
||||
m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0
|
||||
Solution::
|
||||
u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)
|
||||
"""
|
||||
stop_t = 1 + 0.09
|
||||
z0 = array([1.0, 0.1], float)
|
||||
|
||||
k = 4.0
|
||||
m = 1.0
|
||||
|
||||
def f(self, z, t):
|
||||
tmp = zeros((2, 2), float)
|
||||
tmp[0, 1] = 1.0
|
||||
tmp[1, 0] = -self.k / self.m
|
||||
return dot(tmp, z)
|
||||
|
||||
def verify(self, zs, t):
|
||||
omega = sqrt(self.k / self.m)
|
||||
u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega
|
||||
return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol)
|
||||
|
||||
|
||||
class ComplexExp(ODE):
|
||||
r"""The equation :lm:`\dot u = i u`"""
|
||||
stop_t = 1.23*pi
|
||||
z0 = exp([1j, 2j, 3j, 4j, 5j])
|
||||
cmplx = True
|
||||
|
||||
def f(self, z, t):
|
||||
return 1j*z
|
||||
|
||||
def jac(self, z, t):
|
||||
return 1j*eye(5)
|
||||
|
||||
def verify(self, zs, t):
|
||||
u = self.z0 * exp(1j*t)
|
||||
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
|
||||
|
||||
|
||||
class Pi(ODE):
|
||||
r"""Integrate 1/(t + 1j) from t=-10 to t=10"""
|
||||
stop_t = 20
|
||||
z0 = [0]
|
||||
cmplx = True
|
||||
|
||||
def f(self, z, t):
|
||||
return array([1./(t - 10 + 1j)])
|
||||
|
||||
def verify(self, zs, t):
|
||||
u = -2j * np.arctan(10)
|
||||
return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol)
|
||||
|
||||
|
||||
class CoupledDecay(ODE):
|
||||
r"""
|
||||
3 coupled decays suited for banded treatment
|
||||
(banded mode makes it necessary when N>>3)
|
||||
"""
|
||||
|
||||
stiff = True
|
||||
stop_t = 0.5
|
||||
z0 = [5.0, 7.0, 13.0]
|
||||
lband = 1
|
||||
uband = 0
|
||||
|
||||
lmbd = [0.17, 0.23, 0.29] # fictitious decay constants
|
||||
|
||||
def f(self, z, t):
|
||||
lmbd = self.lmbd
|
||||
return np.array([-lmbd[0]*z[0],
|
||||
-lmbd[1]*z[1] + lmbd[0]*z[0],
|
||||
-lmbd[2]*z[2] + lmbd[1]*z[1]])
|
||||
|
||||
def jac(self, z, t):
|
||||
# The full Jacobian is
|
||||
#
|
||||
# [-lmbd[0] 0 0 ]
|
||||
# [ lmbd[0] -lmbd[1] 0 ]
|
||||
# [ 0 lmbd[1] -lmbd[2]]
|
||||
#
|
||||
# The lower and upper bandwidths are lband=1 and uband=0, resp.
|
||||
# The representation of this array in packed format is
|
||||
#
|
||||
# [-lmbd[0] -lmbd[1] -lmbd[2]]
|
||||
# [ lmbd[0] lmbd[1] 0 ]
|
||||
|
||||
lmbd = self.lmbd
|
||||
j = np.zeros((self.lband + self.uband + 1, 3), order='F')
|
||||
|
||||
def set_j(ri, ci, val):
|
||||
j[self.uband + ri - ci, ci] = val
|
||||
set_j(0, 0, -lmbd[0])
|
||||
set_j(1, 0, lmbd[0])
|
||||
set_j(1, 1, -lmbd[1])
|
||||
set_j(2, 1, lmbd[1])
|
||||
set_j(2, 2, -lmbd[2])
|
||||
return j
|
||||
|
||||
def verify(self, zs, t):
|
||||
# Formulae derived by hand
|
||||
lmbd = np.array(self.lmbd)
|
||||
d10 = lmbd[1] - lmbd[0]
|
||||
d21 = lmbd[2] - lmbd[1]
|
||||
d20 = lmbd[2] - lmbd[0]
|
||||
e0 = np.exp(-lmbd[0] * t)
|
||||
e1 = np.exp(-lmbd[1] * t)
|
||||
e2 = np.exp(-lmbd[2] * t)
|
||||
u = np.vstack((
|
||||
self.z0[0] * e0,
|
||||
self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1),
|
||||
self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) +
|
||||
lmbd[1] * lmbd[0] * self.z0[0] / d10 *
|
||||
(1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose()
|
||||
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
|
||||
|
||||
|
||||
PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay]
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
|
||||
def f(t, x):
|
||||
dxdt = [x[1], -x[0]]
|
||||
return dxdt
|
||||
|
||||
|
||||
def jac(t, x):
|
||||
j = array([[0.0, 1.0],
|
||||
[-1.0, 0.0]])
|
||||
return j
|
||||
|
||||
|
||||
def f1(t, x, omega):
|
||||
dxdt = [omega*x[1], -omega*x[0]]
|
||||
return dxdt
|
||||
|
||||
|
||||
def jac1(t, x, omega):
|
||||
j = array([[0.0, omega],
|
||||
[-omega, 0.0]])
|
||||
return j
|
||||
|
||||
|
||||
def f2(t, x, omega1, omega2):
|
||||
dxdt = [omega1*x[1], -omega2*x[0]]
|
||||
return dxdt
|
||||
|
||||
|
||||
def jac2(t, x, omega1, omega2):
|
||||
j = array([[0.0, omega1],
|
||||
[-omega2, 0.0]])
|
||||
return j
|
||||
|
||||
|
||||
def fv(t, x, omega):
|
||||
dxdt = [omega[0]*x[1], -omega[1]*x[0]]
|
||||
return dxdt
|
||||
|
||||
|
||||
def jacv(t, x, omega):
|
||||
j = array([[0.0, omega[0]],
|
||||
[-omega[1], 0.0]])
|
||||
return j
|
||||
|
||||
|
||||
class ODECheckParameterUse:
|
||||
"""Call an ode-class solver with several cases of parameter use."""
|
||||
|
||||
# solver_name must be set before tests can be run with this class.
|
||||
|
||||
# Set these in subclasses.
|
||||
solver_name = ''
|
||||
solver_uses_jac = False
|
||||
|
||||
def _get_solver(self, f, jac):
|
||||
solver = ode(f, jac)
|
||||
if self.solver_uses_jac:
|
||||
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7,
|
||||
with_jacobian=self.solver_uses_jac)
|
||||
else:
|
||||
# XXX Shouldn't set_integrator *always* accept the keyword arg
|
||||
# 'with_jacobian', and perhaps raise an exception if it is set
|
||||
# to True if the solver can't actually use it?
|
||||
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7)
|
||||
return solver
|
||||
|
||||
def _check_solver(self, solver):
|
||||
ic = [1.0, 0.0]
|
||||
solver.set_initial_value(ic, 0.0)
|
||||
solver.integrate(pi)
|
||||
assert_array_almost_equal(solver.y, [-1.0, 0.0])
|
||||
|
||||
def test_no_params(self):
|
||||
solver = self._get_solver(f, jac)
|
||||
self._check_solver(solver)
|
||||
|
||||
def test_one_scalar_param(self):
|
||||
solver = self._get_solver(f1, jac1)
|
||||
omega = 1.0
|
||||
solver.set_f_params(omega)
|
||||
if self.solver_uses_jac:
|
||||
solver.set_jac_params(omega)
|
||||
self._check_solver(solver)
|
||||
|
||||
def test_two_scalar_params(self):
|
||||
solver = self._get_solver(f2, jac2)
|
||||
omega1 = 1.0
|
||||
omega2 = 1.0
|
||||
solver.set_f_params(omega1, omega2)
|
||||
if self.solver_uses_jac:
|
||||
solver.set_jac_params(omega1, omega2)
|
||||
self._check_solver(solver)
|
||||
|
||||
def test_vector_param(self):
|
||||
solver = self._get_solver(fv, jacv)
|
||||
omega = [1.0, 1.0]
|
||||
solver.set_f_params(omega)
|
||||
if self.solver_uses_jac:
|
||||
solver.set_jac_params(omega)
|
||||
self._check_solver(solver)
|
||||
|
||||
def test_warns_on_failure(self):
|
||||
# Set nsteps small to ensure failure
|
||||
solver = self._get_solver(f, jac)
|
||||
solver.set_integrator(self.solver_name, nsteps=1)
|
||||
ic = [1.0, 0.0]
|
||||
solver.set_initial_value(ic, 0.0)
|
||||
assert_warns(UserWarning, solver.integrate, pi)
|
||||
|
||||
|
||||
class TestDOPRI5CheckParameterUse(ODECheckParameterUse):
|
||||
solver_name = 'dopri5'
|
||||
solver_uses_jac = False
|
||||
|
||||
|
||||
class TestDOP853CheckParameterUse(ODECheckParameterUse):
|
||||
solver_name = 'dop853'
|
||||
solver_uses_jac = False
|
||||
|
||||
|
||||
class TestVODECheckParameterUse(ODECheckParameterUse):
|
||||
solver_name = 'vode'
|
||||
solver_uses_jac = True
|
||||
|
||||
|
||||
class TestZVODECheckParameterUse(ODECheckParameterUse):
|
||||
solver_name = 'zvode'
|
||||
solver_uses_jac = True
|
||||
|
||||
|
||||
class TestLSODACheckParameterUse(ODECheckParameterUse):
|
||||
solver_name = 'lsoda'
|
||||
solver_uses_jac = True
|
||||
|
||||
|
||||
def test_odeint_trivial_time():
|
||||
# Test that odeint succeeds when given a single time point
|
||||
# and full_output=True. This is a regression test for gh-4282.
|
||||
y0 = 1
|
||||
t = [0]
|
||||
y, info = odeint(lambda y, t: -y, y0, t, full_output=True)
|
||||
assert_array_equal(y, np.array([[y0]]))
|
||||
|
||||
|
||||
def test_odeint_banded_jacobian():
|
||||
# Test the use of the `Dfun`, `ml` and `mu` options of odeint.
|
||||
|
||||
def func(y, t, c):
|
||||
return c.dot(y)
|
||||
|
||||
def jac(y, t, c):
|
||||
return c
|
||||
|
||||
def jac_transpose(y, t, c):
|
||||
return c.T.copy(order='C')
|
||||
|
||||
def bjac_rows(y, t, c):
|
||||
jac = np.vstack((np.r_[0, np.diag(c, 1)],
|
||||
np.diag(c),
|
||||
np.r_[np.diag(c, -1), 0],
|
||||
np.r_[np.diag(c, -2), 0, 0]))
|
||||
return jac
|
||||
|
||||
def bjac_cols(y, t, c):
|
||||
return bjac_rows(y, t, c).T.copy(order='C')
|
||||
|
||||
c = array([[-205, 0.01, 0.00, 0.0],
|
||||
[0.1, -2.50, 0.02, 0.0],
|
||||
[1e-3, 0.01, -2.0, 0.01],
|
||||
[0.00, 0.00, 0.1, -1.0]])
|
||||
|
||||
y0 = np.ones(4)
|
||||
t = np.array([0, 5, 10, 100])
|
||||
|
||||
# Use the full Jacobian.
|
||||
sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True,
|
||||
atol=1e-13, rtol=1e-11, mxstep=10000,
|
||||
Dfun=jac)
|
||||
|
||||
# Use the transposed full Jacobian, with col_deriv=True.
|
||||
sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True,
|
||||
atol=1e-13, rtol=1e-11, mxstep=10000,
|
||||
Dfun=jac_transpose, col_deriv=True)
|
||||
|
||||
# Use the banded Jacobian.
|
||||
sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True,
|
||||
atol=1e-13, rtol=1e-11, mxstep=10000,
|
||||
Dfun=bjac_rows, ml=2, mu=1)
|
||||
|
||||
# Use the transposed banded Jacobian, with col_deriv=True.
|
||||
sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True,
|
||||
atol=1e-13, rtol=1e-11, mxstep=10000,
|
||||
Dfun=bjac_cols, ml=2, mu=1, col_deriv=True)
|
||||
|
||||
assert_allclose(sol1, sol2, err_msg="sol1 != sol2")
|
||||
assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3")
|
||||
assert_allclose(sol3, sol4, err_msg="sol3 != sol4")
|
||||
|
||||
# Verify that the number of jacobian evaluations was the same for the
|
||||
# calls of odeint with a full jacobian and with a banded jacobian. This is
|
||||
# a regression test--there was a bug in the handling of banded jacobians
|
||||
# that resulted in an incorrect jacobian matrix being passed to the LSODA
|
||||
# code. That would cause errors or excessive jacobian evaluations.
|
||||
assert_array_equal(info1['nje'], info2['nje'])
|
||||
assert_array_equal(info3['nje'], info4['nje'])
|
||||
|
||||
# Test the use of tfirst
|
||||
sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,),
|
||||
full_output=True, atol=1e-13, rtol=1e-11,
|
||||
mxstep=10000,
|
||||
Dfun=lambda t, y, c: jac(y, t, c), tfirst=True)
|
||||
# The code should execute the exact same sequence of floating point
|
||||
# calculations, so these should be exactly equal. We'll be safe and use
|
||||
# a small tolerance.
|
||||
assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty")
|
||||
|
||||
|
||||
def test_odeint_errors():
|
||||
def sys1d(x, t):
|
||||
return -100*x
|
||||
|
||||
def bad1(x, t):
|
||||
return 1.0/0
|
||||
|
||||
def bad2(x, t):
|
||||
return "foo"
|
||||
|
||||
def bad_jac1(x, t):
|
||||
return 1.0/0
|
||||
|
||||
def bad_jac2(x, t):
|
||||
return [["foo"]]
|
||||
|
||||
def sys2d(x, t):
|
||||
return [-100*x[0], -0.1*x[1]]
|
||||
|
||||
def sys2d_bad_jac(x, t):
|
||||
return [[1.0/0, 0], [0, -0.1]]
|
||||
|
||||
assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1])
|
||||
assert_raises(ValueError, odeint, bad2, 1.0, [0, 1])
|
||||
|
||||
assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1)
|
||||
assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2)
|
||||
|
||||
assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1],
|
||||
Dfun=sys2d_bad_jac)
|
||||
|
||||
|
||||
def test_odeint_bad_shapes():
|
||||
# Tests of some errors that can occur with odeint.
|
||||
|
||||
def badrhs(x, t):
|
||||
return [1, -1]
|
||||
|
||||
def sys1(x, t):
|
||||
return -100*x
|
||||
|
||||
def badjac(x, t):
|
||||
return [[0, 0, 0]]
|
||||
|
||||
# y0 must be at most 1-d.
|
||||
bad_y0 = [[0, 0], [0, 0]]
|
||||
assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1])
|
||||
|
||||
# t must be at most 1-d.
|
||||
bad_t = [[0, 1], [2, 3]]
|
||||
assert_raises(ValueError, odeint, sys1, [10.0], bad_t)
|
||||
|
||||
# y0 is 10, but badrhs(x, t) returns [1, -1].
|
||||
assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1])
|
||||
|
||||
# shape of array returned by badjac(x, t) is not correct.
|
||||
assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac)
|
||||
|
||||
|
||||
def test_repeated_t_values():
|
||||
"""Regression test for gh-8217."""
|
||||
|
||||
def func(x, t):
|
||||
return -0.25*x
|
||||
|
||||
t = np.zeros(10)
|
||||
sol = odeint(func, [1.], t)
|
||||
assert_array_equal(sol, np.ones((len(t), 1)))
|
||||
|
||||
tau = 4*np.log(2)
|
||||
t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau]
|
||||
sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12)
|
||||
expected_sol = np.array([[1.0, 2.0]]*9 +
|
||||
[[0.5, 1.0],
|
||||
[0.25, 0.5],
|
||||
[0.25, 0.5],
|
||||
[0.125, 0.25]])
|
||||
assert_allclose(sol, expected_sol)
|
||||
|
||||
# Edge case: empty t sequence.
|
||||
sol = odeint(func, [1.], [])
|
||||
assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1)))
|
||||
|
||||
# t values are not monotonic.
|
||||
assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0])
|
||||
assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3])
|
||||
@ -0,0 +1,74 @@
|
||||
import numpy as np
|
||||
from numpy.testing import assert_equal, assert_allclose
|
||||
from scipy.integrate import odeint
|
||||
import scipy.integrate._test_odeint_banded as banded5x5
|
||||
|
||||
|
||||
def rhs(y, t):
|
||||
dydt = np.zeros_like(y)
|
||||
banded5x5.banded5x5(t, y, dydt)
|
||||
return dydt
|
||||
|
||||
|
||||
def jac(y, t):
|
||||
n = len(y)
|
||||
jac = np.zeros((n, n), order='F')
|
||||
banded5x5.banded5x5_jac(t, y, 1, 1, jac)
|
||||
return jac
|
||||
|
||||
|
||||
def bjac(y, t):
|
||||
n = len(y)
|
||||
bjac = np.zeros((4, n), order='F')
|
||||
banded5x5.banded5x5_bjac(t, y, 1, 1, bjac)
|
||||
return bjac
|
||||
|
||||
|
||||
JACTYPE_FULL = 1
|
||||
JACTYPE_BANDED = 4
|
||||
|
||||
|
||||
def check_odeint(jactype):
|
||||
if jactype == JACTYPE_FULL:
|
||||
ml = None
|
||||
mu = None
|
||||
jacobian = jac
|
||||
elif jactype == JACTYPE_BANDED:
|
||||
ml = 2
|
||||
mu = 1
|
||||
jacobian = bjac
|
||||
else:
|
||||
raise ValueError(f"invalid jactype: {jactype!r}")
|
||||
|
||||
y0 = np.arange(1.0, 6.0)
|
||||
# These tolerances must match the tolerances used in banded5x5.f.
|
||||
rtol = 1e-11
|
||||
atol = 1e-13
|
||||
dt = 0.125
|
||||
nsteps = 64
|
||||
t = dt * np.arange(nsteps+1)
|
||||
|
||||
sol, info = odeint(rhs, y0, t,
|
||||
Dfun=jacobian, ml=ml, mu=mu,
|
||||
atol=atol, rtol=rtol, full_output=True)
|
||||
yfinal = sol[-1]
|
||||
odeint_nst = info['nst'][-1]
|
||||
odeint_nfe = info['nfe'][-1]
|
||||
odeint_nje = info['nje'][-1]
|
||||
|
||||
y1 = y0.copy()
|
||||
# Pure Fortran solution. y1 is modified in-place.
|
||||
nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype)
|
||||
|
||||
# It is likely that yfinal and y1 are *exactly* the same, but
|
||||
# we'll be cautious and use assert_allclose.
|
||||
assert_allclose(yfinal, y1, rtol=1e-12)
|
||||
assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje))
|
||||
|
||||
|
||||
def test_odeint_full_jac():
|
||||
check_odeint(JACTYPE_FULL)
|
||||
|
||||
|
||||
def test_odeint_banded_jac():
|
||||
check_odeint(JACTYPE_BANDED)
|
||||
@ -0,0 +1,680 @@
|
||||
import sys
|
||||
import math
|
||||
import numpy as np
|
||||
from numpy import sqrt, cos, sin, arctan, exp, log, pi
|
||||
from numpy.testing import (assert_,
|
||||
assert_allclose, assert_array_less, assert_almost_equal)
|
||||
import pytest
|
||||
|
||||
from scipy.integrate import quad, dblquad, tplquad, nquad
|
||||
from scipy.special import erf, erfc
|
||||
from scipy._lib._ccallback import LowLevelCallable
|
||||
|
||||
import ctypes
|
||||
import ctypes.util
|
||||
from scipy._lib._ccallback_c import sine_ctypes
|
||||
|
||||
import scipy.integrate._test_multivariate as clib_test
|
||||
|
||||
|
||||
def assert_quad(value_and_err, tabled_value, error_tolerance=1.5e-8):
|
||||
value, err = value_and_err
|
||||
assert_allclose(value, tabled_value, atol=err, rtol=0)
|
||||
if error_tolerance is not None:
|
||||
assert_array_less(err, error_tolerance)
|
||||
|
||||
|
||||
def get_clib_test_routine(name, restype, *argtypes):
|
||||
ptr = getattr(clib_test, name)
|
||||
return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes))
|
||||
|
||||
|
||||
class TestCtypesQuad:
|
||||
def setup_method(self):
|
||||
if sys.platform == 'win32':
|
||||
files = ['api-ms-win-crt-math-l1-1-0.dll']
|
||||
elif sys.platform == 'darwin':
|
||||
files = ['libm.dylib']
|
||||
else:
|
||||
files = ['libm.so', 'libm.so.6']
|
||||
|
||||
for file in files:
|
||||
try:
|
||||
self.lib = ctypes.CDLL(file)
|
||||
break
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
# This test doesn't work on some Linux platforms (Fedora for
|
||||
# example) that put an ld script in libm.so - see gh-5370
|
||||
pytest.skip("Ctypes can't import libm.so")
|
||||
|
||||
restype = ctypes.c_double
|
||||
argtypes = (ctypes.c_double,)
|
||||
for name in ['sin', 'cos', 'tan']:
|
||||
func = getattr(self.lib, name)
|
||||
func.restype = restype
|
||||
func.argtypes = argtypes
|
||||
|
||||
def test_typical(self):
|
||||
assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0])
|
||||
assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0])
|
||||
assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0])
|
||||
|
||||
def test_ctypes_sine(self):
|
||||
quad(LowLevelCallable(sine_ctypes), 0, 1)
|
||||
|
||||
def test_ctypes_variants(self):
|
||||
sin_0 = get_clib_test_routine('_sin_0', ctypes.c_double,
|
||||
ctypes.c_double, ctypes.c_void_p)
|
||||
|
||||
sin_1 = get_clib_test_routine('_sin_1', ctypes.c_double,
|
||||
ctypes.c_int, ctypes.POINTER(ctypes.c_double),
|
||||
ctypes.c_void_p)
|
||||
|
||||
sin_2 = get_clib_test_routine('_sin_2', ctypes.c_double,
|
||||
ctypes.c_double)
|
||||
|
||||
sin_3 = get_clib_test_routine('_sin_3', ctypes.c_double,
|
||||
ctypes.c_int, ctypes.POINTER(ctypes.c_double))
|
||||
|
||||
sin_4 = get_clib_test_routine('_sin_3', ctypes.c_double,
|
||||
ctypes.c_int, ctypes.c_double)
|
||||
|
||||
all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4]
|
||||
legacy_sigs = [sin_2, sin_4]
|
||||
legacy_only_sigs = [sin_4]
|
||||
|
||||
# LowLevelCallables work for new signatures
|
||||
for j, func in enumerate(all_sigs):
|
||||
callback = LowLevelCallable(func)
|
||||
if func in legacy_only_sigs:
|
||||
pytest.raises(ValueError, quad, callback, 0, pi)
|
||||
else:
|
||||
assert_allclose(quad(callback, 0, pi)[0], 2.0)
|
||||
|
||||
# Plain ctypes items work only for legacy signatures
|
||||
for j, func in enumerate(legacy_sigs):
|
||||
if func in legacy_sigs:
|
||||
assert_allclose(quad(func, 0, pi)[0], 2.0)
|
||||
else:
|
||||
pytest.raises(ValueError, quad, func, 0, pi)
|
||||
|
||||
|
||||
class TestMultivariateCtypesQuad:
|
||||
def setup_method(self):
|
||||
restype = ctypes.c_double
|
||||
argtypes = (ctypes.c_int, ctypes.c_double)
|
||||
for name in ['_multivariate_typical', '_multivariate_indefinite',
|
||||
'_multivariate_sin']:
|
||||
func = get_clib_test_routine(name, restype, *argtypes)
|
||||
setattr(self, name, func)
|
||||
|
||||
def test_typical(self):
|
||||
# 1) Typical function with two extra arguments:
|
||||
assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)),
|
||||
0.30614353532540296487)
|
||||
|
||||
def test_indefinite(self):
|
||||
# 2) Infinite integration limits --- Euler's constant
|
||||
assert_quad(quad(self._multivariate_indefinite, 0, np.inf),
|
||||
0.577215664901532860606512)
|
||||
|
||||
def test_threadsafety(self):
|
||||
# Ensure multivariate ctypes are threadsafe
|
||||
def threadsafety(y):
|
||||
return y + quad(self._multivariate_sin, 0, 1)[0]
|
||||
assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602)
|
||||
|
||||
|
||||
class TestQuad:
|
||||
def test_typical(self):
|
||||
# 1) Typical function with two extra arguments:
|
||||
def myfunc(x, n, z): # Bessel function integrand
|
||||
return cos(n*x-z*sin(x))/pi
|
||||
assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487)
|
||||
|
||||
def test_indefinite(self):
|
||||
# 2) Infinite integration limits --- Euler's constant
|
||||
def myfunc(x): # Euler's constant integrand
|
||||
return -exp(-x)*log(x)
|
||||
assert_quad(quad(myfunc, 0, np.inf), 0.577215664901532860606512)
|
||||
|
||||
def test_singular(self):
|
||||
# 3) Singular points in region of integration.
|
||||
def myfunc(x):
|
||||
if 0 < x < 2.5:
|
||||
return sin(x)
|
||||
elif 2.5 <= x <= 5.0:
|
||||
return exp(-x)
|
||||
else:
|
||||
return 0.0
|
||||
|
||||
assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]),
|
||||
1 - cos(2.5) + exp(-2.5) - exp(-5.0))
|
||||
|
||||
def test_sine_weighted_finite(self):
|
||||
# 4) Sine weighted integral (finite limits)
|
||||
def myfunc(x, a):
|
||||
return exp(a*(x-1))
|
||||
|
||||
ome = 2.0**3.4
|
||||
assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome),
|
||||
(20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2))
|
||||
|
||||
def test_sine_weighted_infinite(self):
|
||||
# 5) Sine weighted integral (infinite limits)
|
||||
def myfunc(x, a):
|
||||
return exp(-x*a)
|
||||
|
||||
a = 4.0
|
||||
ome = 3.0
|
||||
assert_quad(quad(myfunc, 0, np.inf, args=a, weight='sin', wvar=ome),
|
||||
ome/(a**2 + ome**2))
|
||||
|
||||
def test_cosine_weighted_infinite(self):
|
||||
# 6) Cosine weighted integral (negative infinite limits)
|
||||
def myfunc(x, a):
|
||||
return exp(x*a)
|
||||
|
||||
a = 2.5
|
||||
ome = 2.3
|
||||
assert_quad(quad(myfunc, -np.inf, 0, args=a, weight='cos', wvar=ome),
|
||||
a/(a**2 + ome**2))
|
||||
|
||||
def test_algebraic_log_weight(self):
|
||||
# 6) Algebraic-logarithmic weight.
|
||||
def myfunc(x, a):
|
||||
return 1/(1+x+2**(-a))
|
||||
|
||||
a = 1.5
|
||||
assert_quad(quad(myfunc, -1, 1, args=a, weight='alg',
|
||||
wvar=(-0.5, -0.5)),
|
||||
pi/sqrt((1+2**(-a))**2 - 1))
|
||||
|
||||
def test_cauchypv_weight(self):
|
||||
# 7) Cauchy prinicpal value weighting w(x) = 1/(x-c)
|
||||
def myfunc(x, a):
|
||||
return 2.0**(-a)/((x-1)**2+4.0**(-a))
|
||||
|
||||
a = 0.4
|
||||
tabledValue = ((2.0**(-0.4)*log(1.5) -
|
||||
2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) -
|
||||
arctan(2.0**(a+2)) -
|
||||
arctan(2.0**a)) /
|
||||
(4.0**(-a) + 1))
|
||||
assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0),
|
||||
tabledValue, error_tolerance=1.9e-8)
|
||||
|
||||
def test_b_less_than_a(self):
|
||||
def f(x, p, q):
|
||||
return p * np.exp(-q*x)
|
||||
|
||||
val_1, err_1 = quad(f, 0, np.inf, args=(2, 3))
|
||||
val_2, err_2 = quad(f, np.inf, 0, args=(2, 3))
|
||||
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
|
||||
|
||||
def test_b_less_than_a_2(self):
|
||||
def f(x, s):
|
||||
return np.exp(-x**2 / 2 / s) / np.sqrt(2.*s)
|
||||
|
||||
val_1, err_1 = quad(f, -np.inf, np.inf, args=(2,))
|
||||
val_2, err_2 = quad(f, np.inf, -np.inf, args=(2,))
|
||||
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
|
||||
|
||||
def test_b_less_than_a_3(self):
|
||||
def f(x):
|
||||
return 1.0
|
||||
|
||||
val_1, err_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0))
|
||||
val_2, err_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0))
|
||||
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
|
||||
|
||||
def test_b_less_than_a_full_output(self):
|
||||
def f(x):
|
||||
return 1.0
|
||||
|
||||
res_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0), full_output=True)
|
||||
res_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0), full_output=True)
|
||||
err = max(res_1[1], res_2[1])
|
||||
assert_allclose(res_1[0], -res_2[0], atol=err)
|
||||
|
||||
def test_double_integral(self):
|
||||
# 8) Double Integral test
|
||||
def simpfunc(y, x): # Note order of arguments.
|
||||
return x+y
|
||||
|
||||
a, b = 1.0, 2.0
|
||||
assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x),
|
||||
5/6.0 * (b**3.0-a**3.0))
|
||||
|
||||
def test_double_integral2(self):
|
||||
def func(x0, x1, t0, t1):
|
||||
return x0 + x1 + t0 + t1
|
||||
def g(x):
|
||||
return x
|
||||
def h(x):
|
||||
return 2 * x
|
||||
args = 1, 2
|
||||
assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5)
|
||||
|
||||
def test_double_integral3(self):
|
||||
def func(x0, x1):
|
||||
return x0 + x1 + 1 + 2
|
||||
assert_quad(dblquad(func, 1, 2, 1, 2),6.)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"x_lower, x_upper, y_lower, y_upper, expected",
|
||||
[
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain D = [-inf, 0] for all n.
|
||||
(-np.inf, 0, -np.inf, 0, np.pi / 4),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain D = [-inf, -1] for each n (one at a time).
|
||||
(-np.inf, -1, -np.inf, 0, np.pi / 4 * erfc(1)),
|
||||
(-np.inf, 0, -np.inf, -1, np.pi / 4 * erfc(1)),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain D = [-inf, -1] for all n.
|
||||
(-np.inf, -1, -np.inf, -1, np.pi / 4 * (erfc(1) ** 2)),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain D = [-inf, 1] for each n (one at a time).
|
||||
(-np.inf, 1, -np.inf, 0, np.pi / 4 * (erf(1) + 1)),
|
||||
(-np.inf, 0, -np.inf, 1, np.pi / 4 * (erf(1) + 1)),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain D = [-inf, 1] for all n.
|
||||
(-np.inf, 1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) ** 2)),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain Dx = [-inf, -1] and Dy = [-inf, 1].
|
||||
(-np.inf, -1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain Dx = [-inf, 1] and Dy = [-inf, -1].
|
||||
(-np.inf, 1, -np.inf, -1, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain D = [0, inf] for all n.
|
||||
(0, np.inf, 0, np.inf, np.pi / 4),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain D = [1, inf] for each n (one at a time).
|
||||
(1, np.inf, 0, np.inf, np.pi / 4 * erfc(1)),
|
||||
(0, np.inf, 1, np.inf, np.pi / 4 * erfc(1)),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain D = [1, inf] for all n.
|
||||
(1, np.inf, 1, np.inf, np.pi / 4 * (erfc(1) ** 2)),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain D = [-1, inf] for each n (one at a time).
|
||||
(-1, np.inf, 0, np.inf, np.pi / 4 * (erf(1) + 1)),
|
||||
(0, np.inf, -1, np.inf, np.pi / 4 * (erf(1) + 1)),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain D = [-1, inf] for all n.
|
||||
(-1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) ** 2)),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain Dx = [-1, inf] and Dy = [1, inf].
|
||||
(-1, np.inf, 1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain Dx = [1, inf] and Dy = [-1, inf].
|
||||
(1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
|
||||
# Multiple integration of a function in n = 2 variables: f(x, y, z)
|
||||
# over domain D = [-inf, inf] for all n.
|
||||
(-np.inf, np.inf, -np.inf, np.inf, np.pi)
|
||||
]
|
||||
)
|
||||
def test_double_integral_improper(
|
||||
self, x_lower, x_upper, y_lower, y_upper, expected
|
||||
):
|
||||
# The Gaussian Integral.
|
||||
def f(x, y):
|
||||
return np.exp(-x ** 2 - y ** 2)
|
||||
|
||||
assert_quad(
|
||||
dblquad(f, x_lower, x_upper, y_lower, y_upper),
|
||||
expected,
|
||||
error_tolerance=3e-8
|
||||
)
|
||||
|
||||
def test_triple_integral(self):
|
||||
# 9) Triple Integral test
|
||||
def simpfunc(z, y, x, t): # Note order of arguments.
|
||||
return (x+y+z)*t
|
||||
|
||||
a, b = 1.0, 2.0
|
||||
assert_quad(tplquad(simpfunc, a, b,
|
||||
lambda x: x, lambda x: 2*x,
|
||||
lambda x, y: x - y, lambda x, y: x + y,
|
||||
(2.,)),
|
||||
2*8/3.0 * (b**4.0 - a**4.0))
|
||||
|
||||
@pytest.mark.xslow
|
||||
@pytest.mark.parametrize(
|
||||
"x_lower, x_upper, y_lower, y_upper, z_lower, z_upper, expected",
|
||||
[
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [-inf, 0] for all n.
|
||||
(-np.inf, 0, -np.inf, 0, -np.inf, 0, (np.pi ** (3 / 2)) / 8),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [-inf, -1] for each n (one at a time).
|
||||
(-np.inf, -1, -np.inf, 0, -np.inf, 0,
|
||||
(np.pi ** (3 / 2)) / 8 * erfc(1)),
|
||||
(-np.inf, 0, -np.inf, -1, -np.inf, 0,
|
||||
(np.pi ** (3 / 2)) / 8 * erfc(1)),
|
||||
(-np.inf, 0, -np.inf, 0, -np.inf, -1,
|
||||
(np.pi ** (3 / 2)) / 8 * erfc(1)),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [-inf, -1] for each n (two at a time).
|
||||
(-np.inf, -1, -np.inf, -1, -np.inf, 0,
|
||||
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
|
||||
(-np.inf, -1, -np.inf, 0, -np.inf, -1,
|
||||
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
|
||||
(-np.inf, 0, -np.inf, -1, -np.inf, -1,
|
||||
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [-inf, -1] for all n.
|
||||
(-np.inf, -1, -np.inf, -1, -np.inf, -1,
|
||||
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain Dx = [-inf, -1] and Dy = Dz = [-inf, 1].
|
||||
(-np.inf, -1, -np.inf, 1, -np.inf, 1,
|
||||
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain Dx = Dy = [-inf, -1] and Dz = [-inf, 1].
|
||||
(-np.inf, -1, -np.inf, -1, -np.inf, 1,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain Dx = Dz = [-inf, -1] and Dy = [-inf, 1].
|
||||
(-np.inf, -1, -np.inf, 1, -np.inf, -1,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain Dx = [-inf, 1] and Dy = Dz = [-inf, -1].
|
||||
(-np.inf, 1, -np.inf, -1, -np.inf, -1,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain Dx = Dy = [-inf, 1] and Dz = [-inf, -1].
|
||||
(-np.inf, 1, -np.inf, 1, -np.inf, -1,
|
||||
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain Dx = Dz = [-inf, 1] and Dy = [-inf, -1].
|
||||
(-np.inf, 1, -np.inf, -1, -np.inf, 1,
|
||||
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [-inf, 1] for each n (one at a time).
|
||||
(-np.inf, 1, -np.inf, 0, -np.inf, 0,
|
||||
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
|
||||
(-np.inf, 0, -np.inf, 1, -np.inf, 0,
|
||||
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
|
||||
(-np.inf, 0, -np.inf, 0, -np.inf, 1,
|
||||
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [-inf, 1] for each n (two at a time).
|
||||
(-np.inf, 1, -np.inf, 1, -np.inf, 0,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
|
||||
(-np.inf, 1, -np.inf, 0, -np.inf, 1,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
|
||||
(-np.inf, 0, -np.inf, 1, -np.inf, 1,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [-inf, 1] for all n.
|
||||
(-np.inf, 1, -np.inf, 1, -np.inf, 1,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [0, inf] for all n.
|
||||
(0, np.inf, 0, np.inf, 0, np.inf, (np.pi ** (3 / 2)) / 8),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [1, inf] for each n (one at a time).
|
||||
(1, np.inf, 0, np.inf, 0, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * erfc(1)),
|
||||
(0, np.inf, 1, np.inf, 0, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * erfc(1)),
|
||||
(0, np.inf, 0, np.inf, 1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * erfc(1)),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [1, inf] for each n (two at a time).
|
||||
(1, np.inf, 1, np.inf, 0, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
|
||||
(1, np.inf, 0, np.inf, 1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
|
||||
(0, np.inf, 1, np.inf, 1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [1, inf] for all n.
|
||||
(1, np.inf, 1, np.inf, 1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [-1, inf] for each n (one at a time).
|
||||
(-1, np.inf, 0, np.inf, 0, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
|
||||
(0, np.inf, -1, np.inf, 0, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
|
||||
(0, np.inf, 0, np.inf, -1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [-1, inf] for each n (two at a time).
|
||||
(-1, np.inf, -1, np.inf, 0, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
|
||||
(-1, np.inf, 0, np.inf, -1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
|
||||
(0, np.inf, -1, np.inf, -1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [-1, inf] for all n.
|
||||
(-1, np.inf, -1, np.inf, -1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain Dx = [1, inf] and Dy = Dz = [-1, inf].
|
||||
(1, np.inf, -1, np.inf, -1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain Dx = Dy = [1, inf] and Dz = [-1, inf].
|
||||
(1, np.inf, 1, np.inf, -1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain Dx = Dz = [1, inf] and Dy = [-1, inf].
|
||||
(1, np.inf, -1, np.inf, 1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain Dx = [-1, inf] and Dy = Dz = [1, inf].
|
||||
(-1, np.inf, 1, np.inf, 1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain Dx = Dy = [-1, inf] and Dz = [1, inf].
|
||||
(-1, np.inf, -1, np.inf, 1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain Dx = Dz = [-1, inf] and Dy = [1, inf].
|
||||
(-1, np.inf, 1, np.inf, -1, np.inf,
|
||||
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
|
||||
# Multiple integration of a function in n = 3 variables: f(x, y, z)
|
||||
# over domain D = [-inf, inf] for all n.
|
||||
(-np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf,
|
||||
np.pi ** (3 / 2)),
|
||||
],
|
||||
)
|
||||
def test_triple_integral_improper(
|
||||
self,
|
||||
x_lower,
|
||||
x_upper,
|
||||
y_lower,
|
||||
y_upper,
|
||||
z_lower,
|
||||
z_upper,
|
||||
expected
|
||||
):
|
||||
# The Gaussian Integral.
|
||||
def f(x, y, z):
|
||||
return np.exp(-x ** 2 - y ** 2 - z ** 2)
|
||||
|
||||
assert_quad(
|
||||
tplquad(f, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper),
|
||||
expected,
|
||||
error_tolerance=6e-8
|
||||
)
|
||||
|
||||
def test_complex(self):
|
||||
def tfunc(x):
|
||||
return np.exp(1j*x)
|
||||
|
||||
assert np.allclose(
|
||||
quad(tfunc, 0, np.pi/2, complex_func=True)[0],
|
||||
1+1j)
|
||||
|
||||
# We consider a divergent case in order to force quadpack
|
||||
# to return an error message. The output is compared
|
||||
# against what is returned by explicit integration
|
||||
# of the parts.
|
||||
kwargs = {'a': 0, 'b': np.inf, 'full_output': True,
|
||||
'weight': 'cos', 'wvar': 1}
|
||||
res_c = quad(tfunc, complex_func=True, **kwargs)
|
||||
res_r = quad(lambda x: np.real(np.exp(1j*x)),
|
||||
complex_func=False,
|
||||
**kwargs)
|
||||
res_i = quad(lambda x: np.imag(np.exp(1j*x)),
|
||||
complex_func=False,
|
||||
**kwargs)
|
||||
|
||||
np.testing.assert_equal(res_c[0], res_r[0] + 1j*res_i[0])
|
||||
np.testing.assert_equal(res_c[1], res_r[1] + 1j*res_i[1])
|
||||
|
||||
assert len(res_c[2]['real']) == len(res_r[2:]) == 3
|
||||
assert res_c[2]['real'][2] == res_r[4]
|
||||
assert res_c[2]['real'][1] == res_r[3]
|
||||
assert res_c[2]['real'][0]['lst'] == res_r[2]['lst']
|
||||
|
||||
assert len(res_c[2]['imag']) == len(res_i[2:]) == 1
|
||||
assert res_c[2]['imag'][0]['lst'] == res_i[2]['lst']
|
||||
|
||||
|
||||
class TestNQuad:
|
||||
@pytest.mark.fail_slow(2)
|
||||
def test_fixed_limits(self):
|
||||
def func1(x0, x1, x2, x3):
|
||||
val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) +
|
||||
(1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0))
|
||||
return val
|
||||
|
||||
def opts_basic(*args):
|
||||
return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]}
|
||||
|
||||
res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]],
|
||||
opts=[opts_basic, {}, {}, {}], full_output=True)
|
||||
assert_quad(res[:-1], 1.5267454070738635)
|
||||
assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5)
|
||||
|
||||
@pytest.mark.fail_slow(2)
|
||||
def test_variable_limits(self):
|
||||
scale = .1
|
||||
|
||||
def func2(x0, x1, x2, x3, t0, t1):
|
||||
val = (x0*x1*x3**2 + np.sin(x2) + 1 +
|
||||
(1 if x0 + t1*x1 - t0 > 0 else 0))
|
||||
return val
|
||||
|
||||
def lim0(x1, x2, x3, t0, t1):
|
||||
return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
|
||||
scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
|
||||
|
||||
def lim1(x2, x3, t0, t1):
|
||||
return [scale * (t0*x2 + t1*x3) - 1,
|
||||
scale * (t0*x2 + t1*x3) + 1]
|
||||
|
||||
def lim2(x3, t0, t1):
|
||||
return [scale * (x3 + t0**2*t1**3) - 1,
|
||||
scale * (x3 + t0**2*t1**3) + 1]
|
||||
|
||||
def lim3(t0, t1):
|
||||
return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1]
|
||||
|
||||
def opts0(x1, x2, x3, t0, t1):
|
||||
return {'points': [t0 - t1*x1]}
|
||||
|
||||
def opts1(x2, x3, t0, t1):
|
||||
return {}
|
||||
|
||||
def opts2(x3, t0, t1):
|
||||
return {}
|
||||
|
||||
def opts3(t0, t1):
|
||||
return {}
|
||||
|
||||
res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0),
|
||||
opts=[opts0, opts1, opts2, opts3])
|
||||
assert_quad(res, 25.066666666666663)
|
||||
|
||||
def test_square_separate_ranges_and_opts(self):
|
||||
def f(y, x):
|
||||
return 1.0
|
||||
|
||||
assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0)
|
||||
|
||||
def test_square_aliased_ranges_and_opts(self):
|
||||
def f(y, x):
|
||||
return 1.0
|
||||
|
||||
r = [-1, 1]
|
||||
opt = {}
|
||||
assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0)
|
||||
|
||||
def test_square_separate_fn_ranges_and_opts(self):
|
||||
def f(y, x):
|
||||
return 1.0
|
||||
|
||||
def fn_range0(*args):
|
||||
return (-1, 1)
|
||||
|
||||
def fn_range1(*args):
|
||||
return (-1, 1)
|
||||
|
||||
def fn_opt0(*args):
|
||||
return {}
|
||||
|
||||
def fn_opt1(*args):
|
||||
return {}
|
||||
|
||||
ranges = [fn_range0, fn_range1]
|
||||
opts = [fn_opt0, fn_opt1]
|
||||
assert_quad(nquad(f, ranges, opts=opts), 4.0)
|
||||
|
||||
def test_square_aliased_fn_ranges_and_opts(self):
|
||||
def f(y, x):
|
||||
return 1.0
|
||||
|
||||
def fn_range(*args):
|
||||
return (-1, 1)
|
||||
|
||||
def fn_opt(*args):
|
||||
return {}
|
||||
|
||||
ranges = [fn_range, fn_range]
|
||||
opts = [fn_opt, fn_opt]
|
||||
assert_quad(nquad(f, ranges, opts=opts), 4.0)
|
||||
|
||||
def test_matching_quad(self):
|
||||
def func(x):
|
||||
return x**2 + 1
|
||||
|
||||
res, reserr = quad(func, 0, 4)
|
||||
res2, reserr2 = nquad(func, ranges=[[0, 4]])
|
||||
assert_almost_equal(res, res2)
|
||||
assert_almost_equal(reserr, reserr2)
|
||||
|
||||
def test_matching_dblquad(self):
|
||||
def func2d(x0, x1):
|
||||
return x0**2 + x1**3 - x0 * x1 + 1
|
||||
|
||||
res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3)
|
||||
res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)])
|
||||
assert_almost_equal(res, res2)
|
||||
assert_almost_equal(reserr, reserr2)
|
||||
|
||||
def test_matching_tplquad(self):
|
||||
def func3d(x0, x1, x2, c0, c1):
|
||||
return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2)
|
||||
|
||||
res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2,
|
||||
lambda x, y: -np.pi, lambda x, y: np.pi,
|
||||
args=(2, 3))
|
||||
res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3))
|
||||
assert_almost_equal(res, res2)
|
||||
|
||||
def test_dict_as_opts(self):
|
||||
try:
|
||||
nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001})
|
||||
except TypeError:
|
||||
assert False
|
||||
|
||||
@ -0,0 +1,721 @@
|
||||
# mypy: disable-error-code="attr-defined"
|
||||
import pytest
|
||||
import numpy as np
|
||||
from numpy import cos, sin, pi
|
||||
from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
|
||||
assert_, suppress_warnings)
|
||||
from hypothesis import given
|
||||
import hypothesis.strategies as st
|
||||
import hypothesis.extra.numpy as hyp_num
|
||||
|
||||
from scipy.integrate import (quadrature, romberg, romb, newton_cotes,
|
||||
cumulative_trapezoid, trapezoid,
|
||||
quad, simpson, fixed_quad, AccuracyWarning,
|
||||
qmc_quad, cumulative_simpson)
|
||||
from scipy.integrate._quadrature import _cumulative_simpson_unequal_intervals
|
||||
from scipy import stats, special
|
||||
|
||||
|
||||
class TestFixedQuad:
|
||||
def test_scalar(self):
|
||||
n = 4
|
||||
expected = 1/(2*n)
|
||||
got, _ = fixed_quad(lambda x: x**(2*n - 1), 0, 1, n=n)
|
||||
# quadrature exact for this input
|
||||
assert_allclose(got, expected, rtol=1e-12)
|
||||
|
||||
def test_vector(self):
|
||||
n = 4
|
||||
p = np.arange(1, 2*n)
|
||||
expected = 1/(p + 1)
|
||||
got, _ = fixed_quad(lambda x: x**p[:, None], 0, 1, n=n)
|
||||
assert_allclose(got, expected, rtol=1e-12)
|
||||
|
||||
|
||||
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
|
||||
class TestQuadrature:
|
||||
def quad(self, x, a, b, args):
|
||||
raise NotImplementedError
|
||||
|
||||
def test_quadrature(self):
|
||||
# Typical function with two extra arguments:
|
||||
def myfunc(x, n, z): # Bessel function integrand
|
||||
return cos(n*x-z*sin(x))/pi
|
||||
val, err = quadrature(myfunc, 0, pi, (2, 1.8))
|
||||
table_val = 0.30614353532540296487
|
||||
assert_almost_equal(val, table_val, decimal=7)
|
||||
|
||||
def test_quadrature_rtol(self):
|
||||
def myfunc(x, n, z): # Bessel function integrand
|
||||
return 1e90 * cos(n*x-z*sin(x))/pi
|
||||
val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10)
|
||||
table_val = 1e90 * 0.30614353532540296487
|
||||
assert_allclose(val, table_val, rtol=1e-10)
|
||||
|
||||
def test_quadrature_miniter(self):
|
||||
# Typical function with two extra arguments:
|
||||
def myfunc(x, n, z): # Bessel function integrand
|
||||
return cos(n*x-z*sin(x))/pi
|
||||
table_val = 0.30614353532540296487
|
||||
for miniter in [5, 52]:
|
||||
val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter)
|
||||
assert_almost_equal(val, table_val, decimal=7)
|
||||
assert_(err < 1.0)
|
||||
|
||||
def test_quadrature_single_args(self):
|
||||
def myfunc(x, n):
|
||||
return 1e90 * cos(n*x-1.8*sin(x))/pi
|
||||
val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10)
|
||||
table_val = 1e90 * 0.30614353532540296487
|
||||
assert_allclose(val, table_val, rtol=1e-10)
|
||||
|
||||
def test_romberg(self):
|
||||
# Typical function with two extra arguments:
|
||||
def myfunc(x, n, z): # Bessel function integrand
|
||||
return cos(n*x-z*sin(x))/pi
|
||||
val = romberg(myfunc, 0, pi, args=(2, 1.8))
|
||||
table_val = 0.30614353532540296487
|
||||
assert_almost_equal(val, table_val, decimal=7)
|
||||
|
||||
def test_romberg_rtol(self):
|
||||
# Typical function with two extra arguments:
|
||||
def myfunc(x, n, z): # Bessel function integrand
|
||||
return 1e19*cos(n*x-z*sin(x))/pi
|
||||
val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10)
|
||||
table_val = 1e19*0.30614353532540296487
|
||||
assert_allclose(val, table_val, rtol=1e-10)
|
||||
|
||||
def test_romb(self):
|
||||
assert_equal(romb(np.arange(17)), 128)
|
||||
|
||||
def test_romb_gh_3731(self):
|
||||
# Check that romb makes maximal use of data points
|
||||
x = np.arange(2**4+1)
|
||||
y = np.cos(0.2*x)
|
||||
val = romb(y)
|
||||
val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max())
|
||||
assert_allclose(val, val2, rtol=1e-8, atol=0)
|
||||
|
||||
# should be equal to romb with 2**k+1 samples
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(AccuracyWarning, "divmax .4. exceeded")
|
||||
val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4)
|
||||
assert_allclose(val, val3, rtol=1e-12, atol=0)
|
||||
|
||||
def test_non_dtype(self):
|
||||
# Check that we work fine with functions returning float
|
||||
import math
|
||||
valmath = romberg(math.sin, 0, 1)
|
||||
expected_val = 0.45969769413185085
|
||||
assert_almost_equal(valmath, expected_val, decimal=7)
|
||||
|
||||
def test_newton_cotes(self):
|
||||
"""Test the first few degrees, for evenly spaced points."""
|
||||
n = 1
|
||||
wts, errcoff = newton_cotes(n, 1)
|
||||
assert_equal(wts, n*np.array([0.5, 0.5]))
|
||||
assert_almost_equal(errcoff, -n**3/12.0)
|
||||
|
||||
n = 2
|
||||
wts, errcoff = newton_cotes(n, 1)
|
||||
assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0)
|
||||
assert_almost_equal(errcoff, -n**5/2880.0)
|
||||
|
||||
n = 3
|
||||
wts, errcoff = newton_cotes(n, 1)
|
||||
assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0)
|
||||
assert_almost_equal(errcoff, -n**5/6480.0)
|
||||
|
||||
n = 4
|
||||
wts, errcoff = newton_cotes(n, 1)
|
||||
assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)
|
||||
assert_almost_equal(errcoff, -n**7/1935360.0)
|
||||
|
||||
def test_newton_cotes2(self):
|
||||
"""Test newton_cotes with points that are not evenly spaced."""
|
||||
|
||||
x = np.array([0.0, 1.5, 2.0])
|
||||
y = x**2
|
||||
wts, errcoff = newton_cotes(x)
|
||||
exact_integral = 8.0/3
|
||||
numeric_integral = np.dot(wts, y)
|
||||
assert_almost_equal(numeric_integral, exact_integral)
|
||||
|
||||
x = np.array([0.0, 1.4, 2.1, 3.0])
|
||||
y = x**2
|
||||
wts, errcoff = newton_cotes(x)
|
||||
exact_integral = 9.0
|
||||
numeric_integral = np.dot(wts, y)
|
||||
assert_almost_equal(numeric_integral, exact_integral)
|
||||
|
||||
def test_simpson(self):
|
||||
y = np.arange(17)
|
||||
assert_equal(simpson(y), 128)
|
||||
assert_equal(simpson(y, dx=0.5), 64)
|
||||
assert_equal(simpson(y, x=np.linspace(0, 4, 17)), 32)
|
||||
|
||||
# integral should be exactly 21
|
||||
x = np.linspace(1, 4, 4)
|
||||
def f(x):
|
||||
return x**2
|
||||
|
||||
assert_allclose(simpson(f(x), x=x), 21.0)
|
||||
|
||||
# integral should be exactly 114
|
||||
x = np.linspace(1, 7, 4)
|
||||
assert_allclose(simpson(f(x), dx=2.0), 114)
|
||||
|
||||
# test multi-axis behaviour
|
||||
a = np.arange(16).reshape(4, 4)
|
||||
x = np.arange(64.).reshape(4, 4, 4)
|
||||
y = f(x)
|
||||
for i in range(3):
|
||||
r = simpson(y, x=x, axis=i)
|
||||
it = np.nditer(a, flags=['multi_index'])
|
||||
for _ in it:
|
||||
idx = list(it.multi_index)
|
||||
idx.insert(i, slice(None))
|
||||
integral = x[tuple(idx)][-1]**3 / 3 - x[tuple(idx)][0]**3 / 3
|
||||
assert_allclose(r[it.multi_index], integral)
|
||||
|
||||
# test when integration axis only has two points
|
||||
x = np.arange(16).reshape(8, 2)
|
||||
y = f(x)
|
||||
r = simpson(y, x=x, axis=-1)
|
||||
|
||||
integral = 0.5 * (y[:, 1] + y[:, 0]) * (x[:, 1] - x[:, 0])
|
||||
assert_allclose(r, integral)
|
||||
|
||||
# odd points, test multi-axis behaviour
|
||||
a = np.arange(25).reshape(5, 5)
|
||||
x = np.arange(125).reshape(5, 5, 5)
|
||||
y = f(x)
|
||||
for i in range(3):
|
||||
r = simpson(y, x=x, axis=i)
|
||||
it = np.nditer(a, flags=['multi_index'])
|
||||
for _ in it:
|
||||
idx = list(it.multi_index)
|
||||
idx.insert(i, slice(None))
|
||||
integral = x[tuple(idx)][-1]**3 / 3 - x[tuple(idx)][0]**3 / 3
|
||||
assert_allclose(r[it.multi_index], integral)
|
||||
|
||||
# Tests for checking base case
|
||||
x = np.array([3])
|
||||
y = np.power(x, 2)
|
||||
assert_allclose(simpson(y, x=x, axis=0), 0.0)
|
||||
assert_allclose(simpson(y, x=x, axis=-1), 0.0)
|
||||
|
||||
x = np.array([3, 3, 3, 3])
|
||||
y = np.power(x, 2)
|
||||
assert_allclose(simpson(y, x=x, axis=0), 0.0)
|
||||
assert_allclose(simpson(y, x=x, axis=-1), 0.0)
|
||||
|
||||
x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 2, 4, 8]])
|
||||
y = np.power(x, 2)
|
||||
zero_axis = [0.0, 0.0, 0.0, 0.0]
|
||||
default_axis = [170 + 1/3] * 3 # 8**3 / 3 - 1/3
|
||||
assert_allclose(simpson(y, x=x, axis=0), zero_axis)
|
||||
# the following should be exact
|
||||
assert_allclose(simpson(y, x=x, axis=-1), default_axis)
|
||||
|
||||
x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 8, 16, 32]])
|
||||
y = np.power(x, 2)
|
||||
zero_axis = [0.0, 136.0, 1088.0, 8704.0]
|
||||
default_axis = [170 + 1/3, 170 + 1/3, 32**3 / 3 - 1/3]
|
||||
assert_allclose(simpson(y, x=x, axis=0), zero_axis)
|
||||
assert_allclose(simpson(y, x=x, axis=-1), default_axis)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('droplast', [False, True])
|
||||
def test_simpson_2d_integer_no_x(self, droplast):
|
||||
# The inputs are 2d integer arrays. The results should be
|
||||
# identical to the results when the inputs are floating point.
|
||||
y = np.array([[2, 2, 4, 4, 8, 8, -4, 5],
|
||||
[4, 4, 2, -4, 10, 22, -2, 10]])
|
||||
if droplast:
|
||||
y = y[:, :-1]
|
||||
result = simpson(y, axis=-1)
|
||||
expected = simpson(np.array(y, dtype=np.float64), axis=-1)
|
||||
assert_equal(result, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('func', [romberg, quadrature])
|
||||
def test_deprecate_integrator(func):
|
||||
message = f"`scipy.integrate.{func.__name__}` is deprecated..."
|
||||
with pytest.deprecated_call(match=message):
|
||||
func(np.exp, 0, 1)
|
||||
|
||||
|
||||
class TestCumulative_trapezoid:
|
||||
def test_1d(self):
|
||||
x = np.linspace(-2, 2, num=5)
|
||||
y = x
|
||||
y_int = cumulative_trapezoid(y, x, initial=0)
|
||||
y_expected = [0., -1.5, -2., -1.5, 0.]
|
||||
assert_allclose(y_int, y_expected)
|
||||
|
||||
y_int = cumulative_trapezoid(y, x, initial=None)
|
||||
assert_allclose(y_int, y_expected[1:])
|
||||
|
||||
def test_y_nd_x_nd(self):
|
||||
x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
|
||||
y = x
|
||||
y_int = cumulative_trapezoid(y, x, initial=0)
|
||||
y_expected = np.array([[[0., 0.5, 2., 4.5],
|
||||
[0., 4.5, 10., 16.5]],
|
||||
[[0., 8.5, 18., 28.5],
|
||||
[0., 12.5, 26., 40.5]],
|
||||
[[0., 16.5, 34., 52.5],
|
||||
[0., 20.5, 42., 64.5]]])
|
||||
|
||||
assert_allclose(y_int, y_expected)
|
||||
|
||||
# Try with all axes
|
||||
shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]
|
||||
for axis, shape in zip([0, 1, 2], shapes):
|
||||
y_int = cumulative_trapezoid(y, x, initial=0, axis=axis)
|
||||
assert_equal(y_int.shape, (3, 2, 4))
|
||||
y_int = cumulative_trapezoid(y, x, initial=None, axis=axis)
|
||||
assert_equal(y_int.shape, shape)
|
||||
|
||||
def test_y_nd_x_1d(self):
|
||||
y = np.arange(3 * 2 * 4).reshape(3, 2, 4)
|
||||
x = np.arange(4)**2
|
||||
# Try with all axes
|
||||
ys_expected = (
|
||||
np.array([[[4., 5., 6., 7.],
|
||||
[8., 9., 10., 11.]],
|
||||
[[40., 44., 48., 52.],
|
||||
[56., 60., 64., 68.]]]),
|
||||
np.array([[[2., 3., 4., 5.]],
|
||||
[[10., 11., 12., 13.]],
|
||||
[[18., 19., 20., 21.]]]),
|
||||
np.array([[[0.5, 5., 17.5],
|
||||
[4.5, 21., 53.5]],
|
||||
[[8.5, 37., 89.5],
|
||||
[12.5, 53., 125.5]],
|
||||
[[16.5, 69., 161.5],
|
||||
[20.5, 85., 197.5]]]))
|
||||
|
||||
for axis, y_expected in zip([0, 1, 2], ys_expected):
|
||||
y_int = cumulative_trapezoid(y, x=x[:y.shape[axis]], axis=axis,
|
||||
initial=None)
|
||||
assert_allclose(y_int, y_expected)
|
||||
|
||||
def test_x_none(self):
|
||||
y = np.linspace(-2, 2, num=5)
|
||||
|
||||
y_int = cumulative_trapezoid(y)
|
||||
y_expected = [-1.5, -2., -1.5, 0.]
|
||||
assert_allclose(y_int, y_expected)
|
||||
|
||||
y_int = cumulative_trapezoid(y, initial=0)
|
||||
y_expected = [0, -1.5, -2., -1.5, 0.]
|
||||
assert_allclose(y_int, y_expected)
|
||||
|
||||
y_int = cumulative_trapezoid(y, dx=3)
|
||||
y_expected = [-4.5, -6., -4.5, 0.]
|
||||
assert_allclose(y_int, y_expected)
|
||||
|
||||
y_int = cumulative_trapezoid(y, dx=3, initial=0)
|
||||
y_expected = [0, -4.5, -6., -4.5, 0.]
|
||||
assert_allclose(y_int, y_expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"initial", [1, 0.5]
|
||||
)
|
||||
def test_initial_warning(self, initial):
|
||||
"""If initial is not None or 0, a ValueError is raised."""
|
||||
y = np.linspace(0, 10, num=10)
|
||||
with pytest.deprecated_call(match="`initial`"):
|
||||
res = cumulative_trapezoid(y, initial=initial)
|
||||
assert_allclose(res, [initial, *np.cumsum(y[1:] + y[:-1])/2])
|
||||
|
||||
def test_zero_len_y(self):
|
||||
with pytest.raises(ValueError, match="At least one point is required"):
|
||||
cumulative_trapezoid(y=[])
|
||||
|
||||
|
||||
class TestTrapezoid:
|
||||
def test_simple(self):
|
||||
x = np.arange(-10, 10, .1)
|
||||
r = trapezoid(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1)
|
||||
# check integral of normal equals 1
|
||||
assert_allclose(r, 1)
|
||||
|
||||
def test_ndim(self):
|
||||
x = np.linspace(0, 1, 3)
|
||||
y = np.linspace(0, 2, 8)
|
||||
z = np.linspace(0, 3, 13)
|
||||
|
||||
wx = np.ones_like(x) * (x[1] - x[0])
|
||||
wx[0] /= 2
|
||||
wx[-1] /= 2
|
||||
wy = np.ones_like(y) * (y[1] - y[0])
|
||||
wy[0] /= 2
|
||||
wy[-1] /= 2
|
||||
wz = np.ones_like(z) * (z[1] - z[0])
|
||||
wz[0] /= 2
|
||||
wz[-1] /= 2
|
||||
|
||||
q = x[:, None, None] + y[None,:, None] + z[None, None,:]
|
||||
|
||||
qx = (q * wx[:, None, None]).sum(axis=0)
|
||||
qy = (q * wy[None, :, None]).sum(axis=1)
|
||||
qz = (q * wz[None, None, :]).sum(axis=2)
|
||||
|
||||
# n-d `x`
|
||||
r = trapezoid(q, x=x[:, None, None], axis=0)
|
||||
assert_allclose(r, qx)
|
||||
r = trapezoid(q, x=y[None,:, None], axis=1)
|
||||
assert_allclose(r, qy)
|
||||
r = trapezoid(q, x=z[None, None,:], axis=2)
|
||||
assert_allclose(r, qz)
|
||||
|
||||
# 1-d `x`
|
||||
r = trapezoid(q, x=x, axis=0)
|
||||
assert_allclose(r, qx)
|
||||
r = trapezoid(q, x=y, axis=1)
|
||||
assert_allclose(r, qy)
|
||||
r = trapezoid(q, x=z, axis=2)
|
||||
assert_allclose(r, qz)
|
||||
|
||||
def test_masked(self):
|
||||
# Testing that masked arrays behave as if the function is 0 where
|
||||
# masked
|
||||
x = np.arange(5)
|
||||
y = x * x
|
||||
mask = x == 2
|
||||
ym = np.ma.array(y, mask=mask)
|
||||
r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16))
|
||||
assert_allclose(trapezoid(ym, x), r)
|
||||
|
||||
xm = np.ma.array(x, mask=mask)
|
||||
assert_allclose(trapezoid(ym, xm), r)
|
||||
|
||||
xm = np.ma.array(x, mask=mask)
|
||||
assert_allclose(trapezoid(y, xm), r)
|
||||
|
||||
|
||||
class TestQMCQuad:
|
||||
def test_input_validation(self):
|
||||
message = "`func` must be callable."
|
||||
with pytest.raises(TypeError, match=message):
|
||||
qmc_quad("a duck", [0, 0], [1, 1])
|
||||
|
||||
message = "`func` must evaluate the integrand at points..."
|
||||
with pytest.raises(ValueError, match=message):
|
||||
qmc_quad(lambda: 1, [0, 0], [1, 1])
|
||||
|
||||
def func(x):
|
||||
assert x.ndim == 1
|
||||
return np.sum(x)
|
||||
message = "Exception encountered when attempting vectorized call..."
|
||||
with pytest.warns(UserWarning, match=message):
|
||||
qmc_quad(func, [0, 0], [1, 1])
|
||||
|
||||
message = "`n_points` must be an integer."
|
||||
with pytest.raises(TypeError, match=message):
|
||||
qmc_quad(lambda x: 1, [0, 0], [1, 1], n_points=1024.5)
|
||||
|
||||
message = "`n_estimates` must be an integer."
|
||||
with pytest.raises(TypeError, match=message):
|
||||
qmc_quad(lambda x: 1, [0, 0], [1, 1], n_estimates=8.5)
|
||||
|
||||
message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine."
|
||||
with pytest.raises(TypeError, match=message):
|
||||
qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng="a duck")
|
||||
|
||||
message = "`qrng` must be initialized with dimensionality equal to "
|
||||
with pytest.raises(ValueError, match=message):
|
||||
qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng=stats.qmc.Sobol(1))
|
||||
|
||||
message = r"`log` must be boolean \(`True` or `False`\)."
|
||||
with pytest.raises(TypeError, match=message):
|
||||
qmc_quad(lambda x: 1, [0, 0], [1, 1], log=10)
|
||||
|
||||
def basic_test(self, n_points=2**8, n_estimates=8, signs=np.ones(2)):
|
||||
|
||||
ndim = 2
|
||||
mean = np.zeros(ndim)
|
||||
cov = np.eye(ndim)
|
||||
|
||||
def func(x):
|
||||
return stats.multivariate_normal.pdf(x.T, mean, cov)
|
||||
|
||||
rng = np.random.default_rng(2879434385674690281)
|
||||
qrng = stats.qmc.Sobol(ndim, seed=rng)
|
||||
a = np.zeros(ndim)
|
||||
b = np.ones(ndim) * signs
|
||||
res = qmc_quad(func, a, b, n_points=n_points,
|
||||
n_estimates=n_estimates, qrng=qrng)
|
||||
ref = stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a)
|
||||
atol = special.stdtrit(n_estimates-1, 0.995) * res.standard_error # 99% CI
|
||||
assert_allclose(res.integral, ref, atol=atol)
|
||||
assert np.prod(signs)*res.integral > 0
|
||||
|
||||
rng = np.random.default_rng(2879434385674690281)
|
||||
qrng = stats.qmc.Sobol(ndim, seed=rng)
|
||||
logres = qmc_quad(lambda *args: np.log(func(*args)), a, b,
|
||||
n_points=n_points, n_estimates=n_estimates,
|
||||
log=True, qrng=qrng)
|
||||
assert_allclose(np.exp(logres.integral), res.integral, rtol=1e-14)
|
||||
assert np.imag(logres.integral) == (np.pi if np.prod(signs) < 0 else 0)
|
||||
assert_allclose(np.exp(logres.standard_error),
|
||||
res.standard_error, rtol=1e-14, atol=1e-16)
|
||||
|
||||
@pytest.mark.parametrize("n_points", [2**8, 2**12])
|
||||
@pytest.mark.parametrize("n_estimates", [8, 16])
|
||||
def test_basic(self, n_points, n_estimates):
|
||||
self.basic_test(n_points, n_estimates)
|
||||
|
||||
@pytest.mark.parametrize("signs", [[1, 1], [-1, -1], [-1, 1], [1, -1]])
|
||||
def test_sign(self, signs):
|
||||
self.basic_test(signs=signs)
|
||||
|
||||
@pytest.mark.parametrize("log", [False, True])
|
||||
def test_zero(self, log):
|
||||
message = "A lower limit was equal to an upper limit, so"
|
||||
with pytest.warns(UserWarning, match=message):
|
||||
res = qmc_quad(lambda x: 1, [0, 0], [0, 1], log=log)
|
||||
assert res.integral == (-np.inf if log else 0)
|
||||
assert res.standard_error == 0
|
||||
|
||||
def test_flexible_input(self):
|
||||
# check that qrng is not required
|
||||
# also checks that for 1d problems, a and b can be scalars
|
||||
def func(x):
|
||||
return stats.norm.pdf(x, scale=2)
|
||||
|
||||
res = qmc_quad(func, 0, 1)
|
||||
ref = stats.norm.cdf(1, scale=2) - stats.norm.cdf(0, scale=2)
|
||||
assert_allclose(res.integral, ref, 1e-2)
|
||||
|
||||
|
||||
def cumulative_simpson_nd_reference(y, *, x=None, dx=None, initial=None, axis=-1):
|
||||
# Use cumulative_trapezoid if length of y < 3
|
||||
if y.shape[axis] < 3:
|
||||
if initial is None:
|
||||
return cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=None)
|
||||
else:
|
||||
return initial + cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=0)
|
||||
|
||||
# Ensure that working axis is last axis
|
||||
y = np.moveaxis(y, axis, -1)
|
||||
x = np.moveaxis(x, axis, -1) if np.ndim(x) > 1 else x
|
||||
dx = np.moveaxis(dx, axis, -1) if np.ndim(dx) > 1 else dx
|
||||
initial = np.moveaxis(initial, axis, -1) if np.ndim(initial) > 1 else initial
|
||||
|
||||
# If `x` is not present, create it from `dx`
|
||||
n = y.shape[-1]
|
||||
x = dx * np.arange(n) if dx is not None else x
|
||||
# Similarly, if `initial` is not present, set it to 0
|
||||
initial_was_none = initial is None
|
||||
initial = 0 if initial_was_none else initial
|
||||
|
||||
# `np.apply_along_axis` accepts only one array, so concatenate arguments
|
||||
x = np.broadcast_to(x, y.shape)
|
||||
initial = np.broadcast_to(initial, y.shape[:-1] + (1,))
|
||||
z = np.concatenate((y, x, initial), axis=-1)
|
||||
|
||||
# Use `np.apply_along_axis` to compute result
|
||||
def f(z):
|
||||
return cumulative_simpson(z[:n], x=z[n:2*n], initial=z[2*n:])
|
||||
res = np.apply_along_axis(f, -1, z)
|
||||
|
||||
# Remove `initial` and undo axis move as needed
|
||||
res = res[..., 1:] if initial_was_none else res
|
||||
res = np.moveaxis(res, -1, axis)
|
||||
return res
|
||||
|
||||
|
||||
class TestCumulativeSimpson:
|
||||
x0 = np.arange(4)
|
||||
y0 = x0**2
|
||||
|
||||
@pytest.mark.parametrize('use_dx', (False, True))
|
||||
@pytest.mark.parametrize('use_initial', (False, True))
|
||||
def test_1d(self, use_dx, use_initial):
|
||||
# Test for exact agreement with polynomial of highest
|
||||
# possible order (3 if `dx` is constant, 2 otherwise).
|
||||
rng = np.random.default_rng(82456839535679456794)
|
||||
n = 10
|
||||
|
||||
# Generate random polynomials and ground truth
|
||||
# integral of appropriate order
|
||||
order = 3 if use_dx else 2
|
||||
dx = rng.random()
|
||||
x = (np.sort(rng.random(n)) if order == 2
|
||||
else np.arange(n)*dx + rng.random())
|
||||
i = np.arange(order + 1)[:, np.newaxis]
|
||||
c = rng.random(order + 1)[:, np.newaxis]
|
||||
y = np.sum(c*x**i, axis=0)
|
||||
Y = np.sum(c*x**(i + 1)/(i + 1), axis=0)
|
||||
ref = Y if use_initial else (Y-Y[0])[1:]
|
||||
|
||||
# Integrate with `cumulative_simpson`
|
||||
initial = Y[0] if use_initial else None
|
||||
kwarg = {'dx': dx} if use_dx else {'x': x}
|
||||
res = cumulative_simpson(y, **kwarg, initial=initial)
|
||||
|
||||
# Compare result against reference
|
||||
if not use_dx:
|
||||
assert_allclose(res, ref, rtol=2e-15)
|
||||
else:
|
||||
i0 = 0 if use_initial else 1
|
||||
# all terms are "close"
|
||||
assert_allclose(res, ref, rtol=0.0025)
|
||||
# only even-interval terms are "exact"
|
||||
assert_allclose(res[i0::2], ref[i0::2], rtol=2e-15)
|
||||
|
||||
@pytest.mark.parametrize('axis', np.arange(-3, 3))
|
||||
@pytest.mark.parametrize('x_ndim', (1, 3))
|
||||
@pytest.mark.parametrize('x_len', (1, 2, 7))
|
||||
@pytest.mark.parametrize('i_ndim', (None, 0, 3,))
|
||||
@pytest.mark.parametrize('dx', (None, True))
|
||||
def test_nd(self, axis, x_ndim, x_len, i_ndim, dx):
|
||||
# Test behavior of `cumulative_simpson` with N-D `y`
|
||||
rng = np.random.default_rng(82456839535679456794)
|
||||
|
||||
# determine shapes
|
||||
shape = [5, 6, x_len]
|
||||
shape[axis], shape[-1] = shape[-1], shape[axis]
|
||||
shape_len_1 = shape.copy()
|
||||
shape_len_1[axis] = 1
|
||||
i_shape = shape_len_1 if i_ndim == 3 else ()
|
||||
|
||||
# initialize arguments
|
||||
y = rng.random(size=shape)
|
||||
x, dx = None, None
|
||||
if dx:
|
||||
dx = rng.random(size=shape_len_1) if x_ndim > 1 else rng.random()
|
||||
else:
|
||||
x = (np.sort(rng.random(size=shape), axis=axis) if x_ndim > 1
|
||||
else np.sort(rng.random(size=shape[axis])))
|
||||
initial = None if i_ndim is None else rng.random(size=i_shape)
|
||||
|
||||
# compare results
|
||||
res = cumulative_simpson(y, x=x, dx=dx, initial=initial, axis=axis)
|
||||
ref = cumulative_simpson_nd_reference(y, x=x, dx=dx, initial=initial, axis=axis)
|
||||
np.testing.assert_allclose(res, ref, rtol=1e-15)
|
||||
|
||||
@pytest.mark.parametrize(('message', 'kwarg_update'), [
|
||||
("x must be strictly increasing", dict(x=[2, 2, 3, 4])),
|
||||
("x must be strictly increasing", dict(x=[x0, [2, 2, 4, 8]], y=[y0, y0])),
|
||||
("x must be strictly increasing", dict(x=[x0, x0, x0], y=[y0, y0, y0], axis=0)),
|
||||
("At least one point is required", dict(x=[], y=[])),
|
||||
("`axis=4` is not valid for `y` with `y.ndim=1`", dict(axis=4)),
|
||||
("shape of `x` must be the same as `y` or 1-D", dict(x=np.arange(5))),
|
||||
("`initial` must either be a scalar or...", dict(initial=np.arange(5))),
|
||||
("`dx` must either be a scalar or...", dict(x=None, dx=np.arange(5))),
|
||||
])
|
||||
def test_simpson_exceptions(self, message, kwarg_update):
|
||||
kwargs0 = dict(y=self.y0, x=self.x0, dx=None, initial=None, axis=-1)
|
||||
with pytest.raises(ValueError, match=message):
|
||||
cumulative_simpson(**dict(kwargs0, **kwarg_update))
|
||||
|
||||
def test_special_cases(self):
|
||||
# Test special cases not checked elsewhere
|
||||
rng = np.random.default_rng(82456839535679456794)
|
||||
y = rng.random(size=10)
|
||||
res = cumulative_simpson(y, dx=0)
|
||||
assert_equal(res, 0)
|
||||
|
||||
# Should add tests of:
|
||||
# - all elements of `x` identical
|
||||
# These should work as they do for `simpson`
|
||||
|
||||
def _get_theoretical_diff_between_simps_and_cum_simps(self, y, x):
|
||||
"""`cumulative_simpson` and `simpson` can be tested against other to verify
|
||||
they give consistent results. `simpson` will iteratively be called with
|
||||
successively higher upper limits of integration. This function calculates
|
||||
the theoretical correction required to `simpson` at even intervals to match
|
||||
with `cumulative_simpson`.
|
||||
"""
|
||||
d = np.diff(x, axis=-1)
|
||||
sub_integrals_h1 = _cumulative_simpson_unequal_intervals(y, d)
|
||||
sub_integrals_h2 = _cumulative_simpson_unequal_intervals(
|
||||
y[..., ::-1], d[..., ::-1]
|
||||
)[..., ::-1]
|
||||
|
||||
# Concatenate to build difference array
|
||||
zeros_shape = (*y.shape[:-1], 1)
|
||||
theoretical_difference = np.concatenate(
|
||||
[
|
||||
np.zeros(zeros_shape),
|
||||
(sub_integrals_h1[..., 1:] - sub_integrals_h2[..., :-1]),
|
||||
np.zeros(zeros_shape),
|
||||
],
|
||||
axis=-1,
|
||||
)
|
||||
# Differences only expected at even intervals. Odd intervals will
|
||||
# match exactly so there is no correction
|
||||
theoretical_difference[..., 1::2] = 0.0
|
||||
# Note: the first interval will not match from this correction as
|
||||
# `simpson` uses the trapezoidal rule
|
||||
return theoretical_difference
|
||||
|
||||
@pytest.mark.slow
|
||||
@given(
|
||||
y=hyp_num.arrays(
|
||||
np.float64,
|
||||
hyp_num.array_shapes(max_dims=4, min_side=3, max_side=10),
|
||||
elements=st.floats(-10, 10, allow_nan=False).filter(lambda x: abs(x) > 1e-7)
|
||||
)
|
||||
)
|
||||
def test_cumulative_simpson_against_simpson_with_default_dx(
|
||||
self, y
|
||||
):
|
||||
"""Theoretically, the output of `cumulative_simpson` will be identical
|
||||
to `simpson` at all even indices and in the last index. The first index
|
||||
will not match as `simpson` uses the trapezoidal rule when there are only two
|
||||
data points. Odd indices after the first index are shown to match with
|
||||
a mathematically-derived correction."""
|
||||
def simpson_reference(y):
|
||||
return np.stack(
|
||||
[simpson(y[..., :i], dx=1.0) for i in range(2, y.shape[-1]+1)], axis=-1,
|
||||
)
|
||||
|
||||
res = cumulative_simpson(y, dx=1.0)
|
||||
ref = simpson_reference(y)
|
||||
theoretical_difference = self._get_theoretical_diff_between_simps_and_cum_simps(
|
||||
y, x=np.arange(y.shape[-1])
|
||||
)
|
||||
np.testing.assert_allclose(
|
||||
res[..., 1:], ref[..., 1:] + theoretical_difference[..., 1:]
|
||||
)
|
||||
|
||||
@pytest.mark.slow
|
||||
@given(
|
||||
y=hyp_num.arrays(
|
||||
np.float64,
|
||||
hyp_num.array_shapes(max_dims=4, min_side=3, max_side=10),
|
||||
elements=st.floats(-10, 10, allow_nan=False).filter(lambda x: abs(x) > 1e-7)
|
||||
)
|
||||
)
|
||||
def test_cumulative_simpson_against_simpson(
|
||||
self, y
|
||||
):
|
||||
"""Theoretically, the output of `cumulative_simpson` will be identical
|
||||
to `simpson` at all even indices and in the last index. The first index
|
||||
will not match as `simpson` uses the trapezoidal rule when there are only two
|
||||
data points. Odd indices after the first index are shown to match with
|
||||
a mathematically-derived correction."""
|
||||
interval = 10/(y.shape[-1] - 1)
|
||||
x = np.linspace(0, 10, num=y.shape[-1])
|
||||
x[1:] = x[1:] + 0.2*interval*np.random.uniform(-1, 1, len(x) - 1)
|
||||
|
||||
def simpson_reference(y, x):
|
||||
return np.stack(
|
||||
[simpson(y[..., :i], x=x[..., :i]) for i in range(2, y.shape[-1]+1)],
|
||||
axis=-1,
|
||||
)
|
||||
|
||||
res = cumulative_simpson(y, x=x)
|
||||
ref = simpson_reference(y, x)
|
||||
theoretical_difference = self._get_theoretical_diff_between_simps_and_cum_simps(
|
||||
y, x
|
||||
)
|
||||
np.testing.assert_allclose(
|
||||
res[..., 1:], ref[..., 1:] + theoretical_difference[..., 1:]
|
||||
)
|
||||
@ -0,0 +1,947 @@
|
||||
# mypy: disable-error-code="attr-defined"
|
||||
import os
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose, assert_equal
|
||||
|
||||
import scipy._lib._elementwise_iterative_method as eim
|
||||
from scipy import special, stats
|
||||
from scipy.integrate import quad_vec
|
||||
from scipy.integrate._tanhsinh import _tanhsinh, _pair_cache, _nsum
|
||||
from scipy.stats._discrete_distns import _gen_harmonic_gt1
|
||||
|
||||
class TestTanhSinh:
|
||||
|
||||
# Test problems from [1] Section 6
|
||||
def f1(self, t):
|
||||
return t * np.log(1 + t)
|
||||
|
||||
f1.ref = 0.25
|
||||
f1.b = 1
|
||||
|
||||
def f2(self, t):
|
||||
return t ** 2 * np.arctan(t)
|
||||
|
||||
f2.ref = (np.pi - 2 + 2 * np.log(2)) / 12
|
||||
f2.b = 1
|
||||
|
||||
def f3(self, t):
|
||||
return np.exp(t) * np.cos(t)
|
||||
|
||||
f3.ref = (np.exp(np.pi / 2) - 1) / 2
|
||||
f3.b = np.pi / 2
|
||||
|
||||
def f4(self, t):
|
||||
a = np.sqrt(2 + t ** 2)
|
||||
return np.arctan(a) / ((1 + t ** 2) * a)
|
||||
|
||||
f4.ref = 5 * np.pi ** 2 / 96
|
||||
f4.b = 1
|
||||
|
||||
def f5(self, t):
|
||||
return np.sqrt(t) * np.log(t)
|
||||
|
||||
f5.ref = -4 / 9
|
||||
f5.b = 1
|
||||
|
||||
def f6(self, t):
|
||||
return np.sqrt(1 - t ** 2)
|
||||
|
||||
f6.ref = np.pi / 4
|
||||
f6.b = 1
|
||||
|
||||
def f7(self, t):
|
||||
return np.sqrt(t) / np.sqrt(1 - t ** 2)
|
||||
|
||||
f7.ref = 2 * np.sqrt(np.pi) * special.gamma(3 / 4) / special.gamma(1 / 4)
|
||||
f7.b = 1
|
||||
|
||||
def f8(self, t):
|
||||
return np.log(t) ** 2
|
||||
|
||||
f8.ref = 2
|
||||
f8.b = 1
|
||||
|
||||
def f9(self, t):
|
||||
return np.log(np.cos(t))
|
||||
|
||||
f9.ref = -np.pi * np.log(2) / 2
|
||||
f9.b = np.pi / 2
|
||||
|
||||
def f10(self, t):
|
||||
return np.sqrt(np.tan(t))
|
||||
|
||||
f10.ref = np.pi * np.sqrt(2) / 2
|
||||
f10.b = np.pi / 2
|
||||
|
||||
def f11(self, t):
|
||||
return 1 / (1 + t ** 2)
|
||||
|
||||
f11.ref = np.pi / 2
|
||||
f11.b = np.inf
|
||||
|
||||
def f12(self, t):
|
||||
return np.exp(-t) / np.sqrt(t)
|
||||
|
||||
f12.ref = np.sqrt(np.pi)
|
||||
f12.b = np.inf
|
||||
|
||||
def f13(self, t):
|
||||
return np.exp(-t ** 2 / 2)
|
||||
|
||||
f13.ref = np.sqrt(np.pi / 2)
|
||||
f13.b = np.inf
|
||||
|
||||
def f14(self, t):
|
||||
return np.exp(-t) * np.cos(t)
|
||||
|
||||
f14.ref = 0.5
|
||||
f14.b = np.inf
|
||||
|
||||
def f15(self, t):
|
||||
return np.sin(t) / t
|
||||
|
||||
f15.ref = np.pi / 2
|
||||
f15.b = np.inf
|
||||
|
||||
def error(self, res, ref, log=False):
|
||||
err = abs(res - ref)
|
||||
|
||||
if not log:
|
||||
return err
|
||||
|
||||
with np.errstate(divide='ignore'):
|
||||
return np.log10(err)
|
||||
|
||||
def test_input_validation(self):
|
||||
f = self.f1
|
||||
|
||||
message = '`f` must be callable.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(42, 0, f.b)
|
||||
|
||||
message = '...must be True or False.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, log=2)
|
||||
|
||||
message = '...must be real numbers.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 1+1j, f.b)
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, atol='ekki')
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, rtol=pytest)
|
||||
|
||||
message = '...must be non-negative and finite.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, rtol=-1)
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, atol=np.inf)
|
||||
|
||||
message = '...may not be positive infinity.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, rtol=np.inf, log=True)
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, atol=np.inf, log=True)
|
||||
|
||||
message = '...must be integers.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, maxlevel=object())
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, maxfun=1+1j)
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, minlevel="migratory coconut")
|
||||
|
||||
message = '...must be non-negative.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, maxlevel=-1)
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, maxfun=-1)
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, minlevel=-1)
|
||||
|
||||
message = '...must be True or False.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, preserve_shape=2)
|
||||
|
||||
message = '...must be callable.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_tanhsinh(f, 0, f.b, callback='elderberry')
|
||||
|
||||
@pytest.mark.parametrize("limits, ref", [
|
||||
[(0, np.inf), 0.5], # b infinite
|
||||
[(-np.inf, 0), 0.5], # a infinite
|
||||
[(-np.inf, np.inf), 1], # a and b infinite
|
||||
[(np.inf, -np.inf), -1], # flipped limits
|
||||
[(1, -1), stats.norm.cdf(-1) - stats.norm.cdf(1)], # flipped limits
|
||||
])
|
||||
def test_integral_transforms(self, limits, ref):
|
||||
# Check that the integral transforms are behaving for both normal and
|
||||
# log integration
|
||||
dist = stats.norm()
|
||||
|
||||
res = _tanhsinh(dist.pdf, *limits)
|
||||
assert_allclose(res.integral, ref)
|
||||
|
||||
logres = _tanhsinh(dist.logpdf, *limits, log=True)
|
||||
assert_allclose(np.exp(logres.integral), ref)
|
||||
# Transformation should not make the result complex unnecessarily
|
||||
assert (np.issubdtype(logres.integral.dtype, np.floating) if ref > 0
|
||||
else np.issubdtype(logres.integral.dtype, np.complexfloating))
|
||||
|
||||
assert_allclose(np.exp(logres.error), res.error, atol=1e-16)
|
||||
|
||||
# 15 skipped intentionally; it's very difficult numerically
|
||||
@pytest.mark.parametrize('f_number', range(1, 15))
|
||||
def test_basic(self, f_number):
|
||||
f = getattr(self, f"f{f_number}")
|
||||
rtol = 2e-8
|
||||
res = _tanhsinh(f, 0, f.b, rtol=rtol)
|
||||
assert_allclose(res.integral, f.ref, rtol=rtol)
|
||||
if f_number not in {14}: # mildly underestimates error here
|
||||
true_error = abs(self.error(res.integral, f.ref)/res.integral)
|
||||
assert true_error < res.error
|
||||
|
||||
if f_number in {7, 10, 12}: # succeeds, but doesn't know it
|
||||
return
|
||||
|
||||
assert res.success
|
||||
assert res.status == 0
|
||||
|
||||
@pytest.mark.parametrize('ref', (0.5, [0.4, 0.6]))
|
||||
@pytest.mark.parametrize('case', stats._distr_params.distcont)
|
||||
def test_accuracy(self, ref, case):
|
||||
distname, params = case
|
||||
if distname in {'dgamma', 'dweibull', 'laplace', 'kstwo'}:
|
||||
# should split up interval at first-derivative discontinuity
|
||||
pytest.skip('tanh-sinh is not great for non-smooth integrands')
|
||||
if (distname in {'studentized_range', 'levy_stable'}
|
||||
and not int(os.getenv('SCIPY_XSLOW', 0))):
|
||||
pytest.skip('This case passes, but it is too slow.')
|
||||
dist = getattr(stats, distname)(*params)
|
||||
x = dist.interval(ref)
|
||||
res = _tanhsinh(dist.pdf, *x)
|
||||
assert_allclose(res.integral, ref)
|
||||
|
||||
@pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
|
||||
def test_vectorization(self, shape):
|
||||
# Test for correct functionality, output shapes, and dtypes for various
|
||||
# input shapes.
|
||||
rng = np.random.default_rng(82456839535679456794)
|
||||
a = rng.random(shape)
|
||||
b = rng.random(shape)
|
||||
p = rng.random(shape)
|
||||
n = np.prod(shape)
|
||||
|
||||
def f(x, p):
|
||||
f.ncall += 1
|
||||
f.feval += 1 if (x.size == n or x.ndim <=1) else x.shape[-1]
|
||||
return x**p
|
||||
f.ncall = 0
|
||||
f.feval = 0
|
||||
|
||||
@np.vectorize
|
||||
def _tanhsinh_single(a, b, p):
|
||||
return _tanhsinh(lambda x: x**p, a, b)
|
||||
|
||||
res = _tanhsinh(f, a, b, args=(p,))
|
||||
refs = _tanhsinh_single(a, b, p).ravel()
|
||||
|
||||
attrs = ['integral', 'error', 'success', 'status', 'nfev', 'maxlevel']
|
||||
for attr in attrs:
|
||||
ref_attr = [getattr(ref, attr) for ref in refs]
|
||||
res_attr = getattr(res, attr)
|
||||
assert_allclose(res_attr.ravel(), ref_attr, rtol=1e-15)
|
||||
assert_equal(res_attr.shape, shape)
|
||||
|
||||
assert np.issubdtype(res.success.dtype, np.bool_)
|
||||
assert np.issubdtype(res.status.dtype, np.integer)
|
||||
assert np.issubdtype(res.nfev.dtype, np.integer)
|
||||
assert np.issubdtype(res.maxlevel.dtype, np.integer)
|
||||
assert_equal(np.max(res.nfev), f.feval)
|
||||
# maxlevel = 2 -> 3 function calls (2 initialization, 1 work)
|
||||
assert np.max(res.maxlevel) >= 2
|
||||
assert_equal(np.max(res.maxlevel), f.ncall)
|
||||
|
||||
def test_flags(self):
|
||||
# Test cases that should produce different status flags; show that all
|
||||
# can be produced simultaneously.
|
||||
def f(xs, js):
|
||||
f.nit += 1
|
||||
funcs = [lambda x: np.exp(-x**2), # converges
|
||||
lambda x: np.exp(x), # reaches maxiter due to order=2
|
||||
lambda x: np.full_like(x, np.nan)[()]] # stops due to NaN
|
||||
res = [funcs[j](x) for x, j in zip(xs, js.ravel())]
|
||||
return res
|
||||
f.nit = 0
|
||||
|
||||
args = (np.arange(3, dtype=np.int64),)
|
||||
res = _tanhsinh(f, [np.inf]*3, [-np.inf]*3, maxlevel=5, args=args)
|
||||
ref_flags = np.array([0, -2, -3])
|
||||
assert_equal(res.status, ref_flags)
|
||||
|
||||
def test_flags_preserve_shape(self):
|
||||
# Same test as above but using `preserve_shape` option to simplify.
|
||||
def f(x):
|
||||
return [np.exp(-x[0]**2), # converges
|
||||
np.exp(x[1]), # reaches maxiter due to order=2
|
||||
np.full_like(x[2], np.nan)[()]] # stops due to NaN
|
||||
|
||||
res = _tanhsinh(f, [np.inf]*3, [-np.inf]*3, maxlevel=5, preserve_shape=True)
|
||||
ref_flags = np.array([0, -2, -3])
|
||||
assert_equal(res.status, ref_flags)
|
||||
|
||||
def test_preserve_shape(self):
|
||||
# Test `preserve_shape` option
|
||||
def f(x):
|
||||
return np.asarray([[x, np.sin(10 * x)],
|
||||
[np.cos(30 * x), x * np.sin(100 * x)]])
|
||||
|
||||
ref = quad_vec(f, 0, 1)
|
||||
res = _tanhsinh(f, 0, 1, preserve_shape=True)
|
||||
assert_allclose(res.integral, ref[0])
|
||||
|
||||
def test_convergence(self):
|
||||
# demonstrate that number of accurate digits doubles each iteration
|
||||
f = self.f1
|
||||
last_logerr = 0
|
||||
for i in range(4):
|
||||
res = _tanhsinh(f, 0, f.b, minlevel=0, maxlevel=i)
|
||||
logerr = self.error(res.integral, f.ref, log=True)
|
||||
assert (logerr < last_logerr * 2 or logerr < -15.5)
|
||||
last_logerr = logerr
|
||||
|
||||
def test_options_and_result_attributes(self):
|
||||
# demonstrate that options are behaving as advertised and status
|
||||
# messages are as intended
|
||||
def f(x):
|
||||
f.calls += 1
|
||||
f.feval += np.size(x)
|
||||
return self.f2(x)
|
||||
f.ref = self.f2.ref
|
||||
f.b = self.f2.b
|
||||
default_rtol = 1e-12
|
||||
default_atol = f.ref * default_rtol # effective default absolute tol
|
||||
|
||||
# Test default options
|
||||
f.feval, f.calls = 0, 0
|
||||
ref = _tanhsinh(f, 0, f.b)
|
||||
assert self.error(ref.integral, f.ref) < ref.error < default_atol
|
||||
assert ref.nfev == f.feval
|
||||
ref.calls = f.calls # reference number of function calls
|
||||
assert ref.success
|
||||
assert ref.status == 0
|
||||
|
||||
# Test `maxlevel` equal to required max level
|
||||
# We should get all the same results
|
||||
f.feval, f.calls = 0, 0
|
||||
maxlevel = ref.maxlevel
|
||||
res = _tanhsinh(f, 0, f.b, maxlevel=maxlevel)
|
||||
res.calls = f.calls
|
||||
assert res == ref
|
||||
|
||||
# Now reduce the maximum level. We won't meet tolerances.
|
||||
f.feval, f.calls = 0, 0
|
||||
maxlevel -= 1
|
||||
assert maxlevel >= 2 # can't compare errors otherwise
|
||||
res = _tanhsinh(f, 0, f.b, maxlevel=maxlevel)
|
||||
assert self.error(res.integral, f.ref) < res.error > default_atol
|
||||
assert res.nfev == f.feval < ref.nfev
|
||||
assert f.calls == ref.calls - 1
|
||||
assert not res.success
|
||||
assert res.status == eim._ECONVERR
|
||||
|
||||
# `maxfun` is currently not enforced
|
||||
|
||||
# # Test `maxfun` equal to required number of function evaluations
|
||||
# # We should get all the same results
|
||||
# f.feval, f.calls = 0, 0
|
||||
# maxfun = ref.nfev
|
||||
# res = _tanhsinh(f, 0, f.b, maxfun = maxfun)
|
||||
# assert res == ref
|
||||
#
|
||||
# # Now reduce `maxfun`. We won't meet tolerances.
|
||||
# f.feval, f.calls = 0, 0
|
||||
# maxfun -= 1
|
||||
# res = _tanhsinh(f, 0, f.b, maxfun=maxfun)
|
||||
# assert self.error(res.integral, f.ref) < res.error > default_atol
|
||||
# assert res.nfev == f.feval < ref.nfev
|
||||
# assert f.calls == ref.calls - 1
|
||||
# assert not res.success
|
||||
# assert res.status == 2
|
||||
|
||||
# Take this result to be the new reference
|
||||
ref = res
|
||||
ref.calls = f.calls
|
||||
|
||||
# Test `atol`
|
||||
f.feval, f.calls = 0, 0
|
||||
# With this tolerance, we should get the exact same result as ref
|
||||
atol = np.nextafter(ref.error, np.inf)
|
||||
res = _tanhsinh(f, 0, f.b, rtol=0, atol=atol)
|
||||
assert res.integral == ref.integral
|
||||
assert res.error == ref.error
|
||||
assert res.nfev == f.feval == ref.nfev
|
||||
assert f.calls == ref.calls
|
||||
# Except the result is considered to be successful
|
||||
assert res.success
|
||||
assert res.status == 0
|
||||
|
||||
f.feval, f.calls = 0, 0
|
||||
# With a tighter tolerance, we should get a more accurate result
|
||||
atol = np.nextafter(ref.error, -np.inf)
|
||||
res = _tanhsinh(f, 0, f.b, rtol=0, atol=atol)
|
||||
assert self.error(res.integral, f.ref) < res.error < atol
|
||||
assert res.nfev == f.feval > ref.nfev
|
||||
assert f.calls > ref.calls
|
||||
assert res.success
|
||||
assert res.status == 0
|
||||
|
||||
# Test `rtol`
|
||||
f.feval, f.calls = 0, 0
|
||||
# With this tolerance, we should get the exact same result as ref
|
||||
rtol = np.nextafter(ref.error/ref.integral, np.inf)
|
||||
res = _tanhsinh(f, 0, f.b, rtol=rtol)
|
||||
assert res.integral == ref.integral
|
||||
assert res.error == ref.error
|
||||
assert res.nfev == f.feval == ref.nfev
|
||||
assert f.calls == ref.calls
|
||||
# Except the result is considered to be successful
|
||||
assert res.success
|
||||
assert res.status == 0
|
||||
|
||||
f.feval, f.calls = 0, 0
|
||||
# With a tighter tolerance, we should get a more accurate result
|
||||
rtol = np.nextafter(ref.error/ref.integral, -np.inf)
|
||||
res = _tanhsinh(f, 0, f.b, rtol=rtol)
|
||||
assert self.error(res.integral, f.ref)/f.ref < res.error/res.integral < rtol
|
||||
assert res.nfev == f.feval > ref.nfev
|
||||
assert f.calls > ref.calls
|
||||
assert res.success
|
||||
assert res.status == 0
|
||||
|
||||
@pytest.mark.parametrize('rtol', [1e-4, 1e-14])
|
||||
def test_log(self, rtol):
|
||||
# Test equivalence of log-integration and regular integration
|
||||
dist = stats.norm()
|
||||
|
||||
test_tols = dict(atol=1e-18, rtol=1e-15)
|
||||
|
||||
# Positive integrand (real log-integrand)
|
||||
res = _tanhsinh(dist.logpdf, -1, 2, log=True, rtol=np.log(rtol))
|
||||
ref = _tanhsinh(dist.pdf, -1, 2, rtol=rtol)
|
||||
assert_allclose(np.exp(res.integral), ref.integral, **test_tols)
|
||||
assert_allclose(np.exp(res.error), ref.error, **test_tols)
|
||||
assert res.nfev == ref.nfev
|
||||
|
||||
# Real integrand (complex log-integrand)
|
||||
def f(x):
|
||||
return -dist.logpdf(x)*dist.pdf(x)
|
||||
|
||||
def logf(x):
|
||||
return np.log(dist.logpdf(x) + 0j) + dist.logpdf(x) + np.pi * 1j
|
||||
|
||||
res = _tanhsinh(logf, -np.inf, np.inf, log=True)
|
||||
ref = _tanhsinh(f, -np.inf, np.inf)
|
||||
# In gh-19173, we saw `invalid` warnings on one CI platform.
|
||||
# Silencing `all` because I can't reproduce locally and don't want
|
||||
# to risk the need to run CI again.
|
||||
with np.errstate(all='ignore'):
|
||||
assert_allclose(np.exp(res.integral), ref.integral, **test_tols)
|
||||
assert_allclose(np.exp(res.error), ref.error, **test_tols)
|
||||
assert res.nfev == ref.nfev
|
||||
|
||||
def test_complex(self):
|
||||
# Test integration of complex integrand
|
||||
# Finite limits
|
||||
def f(x):
|
||||
return np.exp(1j * x)
|
||||
|
||||
res = _tanhsinh(f, 0, np.pi/4)
|
||||
ref = np.sqrt(2)/2 + (1-np.sqrt(2)/2)*1j
|
||||
assert_allclose(res.integral, ref)
|
||||
|
||||
# Infinite limits
|
||||
dist1 = stats.norm(scale=1)
|
||||
dist2 = stats.norm(scale=2)
|
||||
def f(x):
|
||||
return dist1.pdf(x) + 1j*dist2.pdf(x)
|
||||
|
||||
res = _tanhsinh(f, np.inf, -np.inf)
|
||||
assert_allclose(res.integral, -(1+1j))
|
||||
|
||||
@pytest.mark.parametrize("maxlevel", range(4))
|
||||
def test_minlevel(self, maxlevel):
|
||||
# Verify that minlevel does not change the values at which the
|
||||
# integrand is evaluated or the integral/error estimates, only the
|
||||
# number of function calls
|
||||
def f(x):
|
||||
f.calls += 1
|
||||
f.feval += np.size(x)
|
||||
f.x = np.concatenate((f.x, x.ravel()))
|
||||
return self.f2(x)
|
||||
f.feval, f.calls, f.x = 0, 0, np.array([])
|
||||
|
||||
ref = _tanhsinh(f, 0, self.f2.b, minlevel=0, maxlevel=maxlevel)
|
||||
ref_x = np.sort(f.x)
|
||||
|
||||
for minlevel in range(0, maxlevel + 1):
|
||||
f.feval, f.calls, f.x = 0, 0, np.array([])
|
||||
options = dict(minlevel=minlevel, maxlevel=maxlevel)
|
||||
res = _tanhsinh(f, 0, self.f2.b, **options)
|
||||
# Should be very close; all that has changed is the order of values
|
||||
assert_allclose(res.integral, ref.integral, rtol=4e-16)
|
||||
# Difference in absolute errors << magnitude of integral
|
||||
assert_allclose(res.error, ref.error, atol=4e-16 * ref.integral)
|
||||
assert res.nfev == f.feval == len(f.x)
|
||||
assert f.calls == maxlevel - minlevel + 1 + 1 # 1 validation call
|
||||
assert res.status == ref.status
|
||||
assert_equal(ref_x, np.sort(f.x))
|
||||
|
||||
def test_improper_integrals(self):
|
||||
# Test handling of infinite limits of integration (mixed with finite limits)
|
||||
def f(x):
|
||||
x[np.isinf(x)] = np.nan
|
||||
return np.exp(-x**2)
|
||||
a = [-np.inf, 0, -np.inf, np.inf, -20, -np.inf, -20]
|
||||
b = [np.inf, np.inf, 0, -np.inf, 20, 20, np.inf]
|
||||
ref = np.sqrt(np.pi)
|
||||
res = _tanhsinh(f, a, b)
|
||||
assert_allclose(res.integral, [ref, ref/2, ref/2, -ref, ref, ref, ref])
|
||||
|
||||
@pytest.mark.parametrize("limits", ((0, 3), ([-np.inf, 0], [3, 3])))
|
||||
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
|
||||
def test_dtype(self, limits, dtype):
|
||||
# Test that dtypes are preserved
|
||||
a, b = np.asarray(limits, dtype=dtype)[()]
|
||||
|
||||
def f(x):
|
||||
assert x.dtype == dtype
|
||||
return np.exp(x)
|
||||
|
||||
rtol = 1e-12 if dtype == np.float64 else 1e-5
|
||||
res = _tanhsinh(f, a, b, rtol=rtol)
|
||||
assert res.integral.dtype == dtype
|
||||
assert res.error.dtype == dtype
|
||||
assert np.all(res.success)
|
||||
assert_allclose(res.integral, np.exp(b)-np.exp(a), rtol=rtol)
|
||||
|
||||
def test_maxiter_callback(self):
|
||||
# Test behavior of `maxiter` parameter and `callback` interface
|
||||
a, b = -np.inf, np.inf
|
||||
def f(x):
|
||||
return np.exp(-x*x)
|
||||
|
||||
minlevel, maxlevel = 0, 2
|
||||
maxiter = maxlevel - minlevel + 1
|
||||
kwargs = dict(minlevel=minlevel, maxlevel=maxlevel, rtol=1e-15)
|
||||
res = _tanhsinh(f, a, b, **kwargs)
|
||||
assert not res.success
|
||||
assert res.maxlevel == maxlevel
|
||||
|
||||
def callback(res):
|
||||
callback.iter += 1
|
||||
callback.res = res
|
||||
assert hasattr(res, 'integral')
|
||||
assert res.status == 1
|
||||
if callback.iter == maxiter:
|
||||
raise StopIteration
|
||||
callback.iter = -1 # callback called once before first iteration
|
||||
callback.res = None
|
||||
|
||||
del kwargs['maxlevel']
|
||||
res2 = _tanhsinh(f, a, b, **kwargs, callback=callback)
|
||||
# terminating with callback is identical to terminating due to maxiter
|
||||
# (except for `status`)
|
||||
for key in res.keys():
|
||||
if key == 'status':
|
||||
assert callback.res[key] == 1
|
||||
assert res[key] == -2
|
||||
assert res2[key] == -4
|
||||
else:
|
||||
assert res2[key] == callback.res[key] == res[key]
|
||||
|
||||
def test_jumpstart(self):
|
||||
# The intermediate results at each level i should be the same as the
|
||||
# final results when jumpstarting at level i; i.e. minlevel=maxlevel=i
|
||||
a, b = -np.inf, np.inf
|
||||
def f(x):
|
||||
return np.exp(-x*x)
|
||||
|
||||
def callback(res):
|
||||
callback.integrals.append(res.integral)
|
||||
callback.errors.append(res.error)
|
||||
callback.integrals = []
|
||||
callback.errors = []
|
||||
|
||||
maxlevel = 4
|
||||
_tanhsinh(f, a, b, minlevel=0, maxlevel=maxlevel, callback=callback)
|
||||
|
||||
integrals = []
|
||||
errors = []
|
||||
for i in range(maxlevel + 1):
|
||||
res = _tanhsinh(f, a, b, minlevel=i, maxlevel=i)
|
||||
integrals.append(res.integral)
|
||||
errors.append(res.error)
|
||||
|
||||
assert_allclose(callback.integrals[1:], integrals, rtol=1e-15)
|
||||
assert_allclose(callback.errors[1:], errors, rtol=1e-15, atol=1e-16)
|
||||
|
||||
def test_special_cases(self):
|
||||
# Test edge cases and other special cases
|
||||
|
||||
# Test that integers are not passed to `f`
|
||||
# (otherwise this would overflow)
|
||||
def f(x):
|
||||
assert np.issubdtype(x.dtype, np.floating)
|
||||
return x ** 99
|
||||
|
||||
res = _tanhsinh(f, 0, 1)
|
||||
assert res.success
|
||||
assert_allclose(res.integral, 1/100)
|
||||
|
||||
# Test levels 0 and 1; error is NaN
|
||||
res = _tanhsinh(f, 0, 1, maxlevel=0)
|
||||
assert res.integral > 0
|
||||
assert_equal(res.error, np.nan)
|
||||
res = _tanhsinh(f, 0, 1, maxlevel=1)
|
||||
assert res.integral > 0
|
||||
assert_equal(res.error, np.nan)
|
||||
|
||||
# Tes equal left and right integration limits
|
||||
res = _tanhsinh(f, 1, 1)
|
||||
assert res.success
|
||||
assert res.maxlevel == -1
|
||||
assert_allclose(res.integral, 0)
|
||||
|
||||
# Test scalar `args` (not in tuple)
|
||||
def f(x, c):
|
||||
return x**c
|
||||
|
||||
res = _tanhsinh(f, 0, 1, args=99)
|
||||
assert_allclose(res.integral, 1/100)
|
||||
|
||||
# Test NaNs
|
||||
a = [np.nan, 0, 0, 0]
|
||||
b = [1, np.nan, 1, 1]
|
||||
c = [1, 1, np.nan, 1]
|
||||
res = _tanhsinh(f, a, b, args=(c,))
|
||||
assert_allclose(res.integral, [np.nan, np.nan, np.nan, 0.5])
|
||||
assert_allclose(res.error[:3], np.nan)
|
||||
assert_equal(res.status, [-3, -3, -3, 0])
|
||||
assert_equal(res.success, [False, False, False, True])
|
||||
assert_equal(res.nfev[:3], 1)
|
||||
|
||||
# Test complex integral followed by real integral
|
||||
# Previously, h0 was of the result dtype. If the `dtype` were complex,
|
||||
# this could lead to complex cached abscissae/weights. If these get
|
||||
# cast to real dtype for a subsequent real integral, we would get a
|
||||
# ComplexWarning. Check that this is avoided.
|
||||
_pair_cache.xjc = np.empty(0)
|
||||
_pair_cache.wj = np.empty(0)
|
||||
_pair_cache.indices = [0]
|
||||
_pair_cache.h0 = None
|
||||
res = _tanhsinh(lambda x: x*1j, 0, 1)
|
||||
assert_allclose(res.integral, 0.5*1j)
|
||||
res = _tanhsinh(lambda x: x, 0, 1)
|
||||
assert_allclose(res.integral, 0.5)
|
||||
|
||||
# Test zero-size
|
||||
shape = (0, 3)
|
||||
res = _tanhsinh(lambda x: x, 0, np.zeros(shape))
|
||||
attrs = ['integral', 'error', 'success', 'status', 'nfev', 'maxlevel']
|
||||
for attr in attrs:
|
||||
assert_equal(res[attr].shape, shape)
|
||||
|
||||
|
||||
class TestNSum:
|
||||
rng = np.random.default_rng(5895448232066142650)
|
||||
p = rng.uniform(1, 10, size=10)
|
||||
|
||||
def f1(self, k):
|
||||
# Integers are never passed to `f1`; if they were, we'd get
|
||||
# integer to negative integer power error
|
||||
return k**(-2)
|
||||
|
||||
f1.ref = np.pi**2/6
|
||||
f1.a = 1
|
||||
f1.b = np.inf
|
||||
f1.args = tuple()
|
||||
|
||||
def f2(self, k, p):
|
||||
return 1 / k**p
|
||||
|
||||
f2.ref = special.zeta(p, 1)
|
||||
f2.a = 1
|
||||
f2.b = np.inf
|
||||
f2.args = (p,)
|
||||
|
||||
def f3(self, k, p):
|
||||
return 1 / k**p
|
||||
|
||||
f3.a = 1
|
||||
f3.b = rng.integers(5, 15, size=(3, 1))
|
||||
f3.ref = _gen_harmonic_gt1(f3.b, p)
|
||||
f3.args = (p,)
|
||||
|
||||
def test_input_validation(self):
|
||||
f = self.f1
|
||||
|
||||
message = '`f` must be callable.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(42, f.a, f.b)
|
||||
|
||||
message = '...must be True or False.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(f, f.a, f.b, log=2)
|
||||
|
||||
message = '...must be real numbers.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(f, 1+1j, f.b)
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(f, f.a, None)
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(f, f.a, f.b, step=object())
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(f, f.a, f.b, atol='ekki')
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(f, f.a, f.b, rtol=pytest)
|
||||
|
||||
with np.errstate(all='ignore'):
|
||||
res = _nsum(f, [np.nan, -np.inf, np.inf], 1)
|
||||
assert np.all((res.status == -1) & np.isnan(res.sum)
|
||||
& np.isnan(res.error) & ~res.success & res.nfev == 1)
|
||||
res = _nsum(f, 10, [np.nan, 1])
|
||||
assert np.all((res.status == -1) & np.isnan(res.sum)
|
||||
& np.isnan(res.error) & ~res.success & res.nfev == 1)
|
||||
res = _nsum(f, 1, 10, step=[np.nan, -np.inf, np.inf, -1, 0])
|
||||
assert np.all((res.status == -1) & np.isnan(res.sum)
|
||||
& np.isnan(res.error) & ~res.success & res.nfev == 1)
|
||||
|
||||
message = '...must be non-negative and finite.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(f, f.a, f.b, rtol=-1)
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(f, f.a, f.b, atol=np.inf)
|
||||
|
||||
message = '...may not be positive infinity.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(f, f.a, f.b, rtol=np.inf, log=True)
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(f, f.a, f.b, atol=np.inf, log=True)
|
||||
|
||||
message = '...must be a non-negative integer.'
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(f, f.a, f.b, maxterms=3.5)
|
||||
with pytest.raises(ValueError, match=message):
|
||||
_nsum(f, f.a, f.b, maxterms=-2)
|
||||
|
||||
@pytest.mark.parametrize('f_number', range(1, 4))
|
||||
def test_basic(self, f_number):
|
||||
f = getattr(self, f"f{f_number}")
|
||||
res = _nsum(f, f.a, f.b, args=f.args)
|
||||
assert_allclose(res.sum, f.ref)
|
||||
assert_equal(res.status, 0)
|
||||
assert_equal(res.success, True)
|
||||
|
||||
with np.errstate(divide='ignore'):
|
||||
logres = _nsum(lambda *args: np.log(f(*args)),
|
||||
f.a, f.b, log=True, args=f.args)
|
||||
assert_allclose(np.exp(logres.sum), res.sum)
|
||||
assert_allclose(np.exp(logres.error), res.error)
|
||||
assert_equal(logres.status, 0)
|
||||
assert_equal(logres.success, True)
|
||||
|
||||
@pytest.mark.parametrize('maxterms', [0, 1, 10, 20, 100])
|
||||
def test_integral(self, maxterms):
|
||||
# test precise behavior of integral approximation
|
||||
f = self.f1
|
||||
|
||||
def logf(x):
|
||||
return -2*np.log(x)
|
||||
|
||||
def F(x):
|
||||
return -1 / x
|
||||
|
||||
a = np.asarray([1, 5])[:, np.newaxis]
|
||||
b = np.asarray([20, 100, np.inf])[:, np.newaxis, np.newaxis]
|
||||
step = np.asarray([0.5, 1, 2]).reshape((-1, 1, 1, 1))
|
||||
nsteps = np.floor((b - a)/step)
|
||||
b_original = b
|
||||
b = a + nsteps*step
|
||||
|
||||
k = a + maxterms*step
|
||||
# partial sum
|
||||
direct = f(a + np.arange(maxterms)*step).sum(axis=-1, keepdims=True)
|
||||
integral = (F(b) - F(k))/step # integral approximation of remainder
|
||||
low = direct + integral + f(b) # theoretical lower bound
|
||||
high = direct + integral + f(k) # theoretical upper bound
|
||||
ref_sum = (low + high)/2 # _nsum uses average of the two
|
||||
ref_err = (high - low)/2 # error (assuming perfect quadrature)
|
||||
|
||||
# correct reference values where number of terms < maxterms
|
||||
a, b, step = np.broadcast_arrays(a, b, step)
|
||||
for i in np.ndindex(a.shape):
|
||||
ai, bi, stepi = a[i], b[i], step[i]
|
||||
if (bi - ai)/stepi + 1 <= maxterms:
|
||||
direct = f(np.arange(ai, bi+stepi, stepi)).sum()
|
||||
ref_sum[i] = direct
|
||||
ref_err[i] = direct * np.finfo(direct).eps
|
||||
|
||||
rtol = 1e-12
|
||||
res = _nsum(f, a, b_original, step=step, maxterms=maxterms, rtol=rtol)
|
||||
assert_allclose(res.sum, ref_sum, rtol=10*rtol)
|
||||
assert_allclose(res.error, ref_err, rtol=100*rtol)
|
||||
assert_equal(res.status, 0)
|
||||
assert_equal(res.success, True)
|
||||
|
||||
i = ((b_original - a)/step + 1 <= maxterms)
|
||||
assert_allclose(res.sum[i], ref_sum[i], rtol=1e-15)
|
||||
assert_allclose(res.error[i], ref_err[i], rtol=1e-15)
|
||||
|
||||
logres = _nsum(logf, a, b_original, step=step, log=True,
|
||||
rtol=np.log(rtol), maxterms=maxterms)
|
||||
assert_allclose(np.exp(logres.sum), res.sum)
|
||||
assert_allclose(np.exp(logres.error), res.error)
|
||||
assert_equal(logres.status, 0)
|
||||
assert_equal(logres.success, True)
|
||||
|
||||
@pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
|
||||
def test_vectorization(self, shape):
|
||||
# Test for correct functionality, output shapes, and dtypes for various
|
||||
# input shapes.
|
||||
rng = np.random.default_rng(82456839535679456794)
|
||||
a = rng.integers(1, 10, size=shape)
|
||||
# when the sum can be computed directly or `maxterms` is large enough
|
||||
# to meet `atol`, there are slight differences (for good reason)
|
||||
# between vectorized call and looping.
|
||||
b = np.inf
|
||||
p = rng.random(shape) + 1
|
||||
n = np.prod(shape)
|
||||
|
||||
def f(x, p):
|
||||
f.feval += 1 if (x.size == n or x.ndim <= 1) else x.shape[-1]
|
||||
return 1 / x ** p
|
||||
|
||||
f.feval = 0
|
||||
|
||||
@np.vectorize
|
||||
def _nsum_single(a, b, p, maxterms):
|
||||
return _nsum(lambda x: 1 / x**p, a, b, maxterms=maxterms)
|
||||
|
||||
res = _nsum(f, a, b, maxterms=1000, args=(p,))
|
||||
refs = _nsum_single(a, b, p, maxterms=1000).ravel()
|
||||
|
||||
attrs = ['sum', 'error', 'success', 'status', 'nfev']
|
||||
for attr in attrs:
|
||||
ref_attr = [getattr(ref, attr) for ref in refs]
|
||||
res_attr = getattr(res, attr)
|
||||
assert_allclose(res_attr.ravel(), ref_attr, rtol=1e-15)
|
||||
assert_equal(res_attr.shape, shape)
|
||||
|
||||
assert np.issubdtype(res.success.dtype, np.bool_)
|
||||
assert np.issubdtype(res.status.dtype, np.integer)
|
||||
assert np.issubdtype(res.nfev.dtype, np.integer)
|
||||
assert_equal(np.max(res.nfev), f.feval)
|
||||
|
||||
def test_status(self):
|
||||
f = self.f2
|
||||
|
||||
p = [2, 2, 0.9, 1.1]
|
||||
a = [0, 0, 1, 1]
|
||||
b = [10, np.inf, np.inf, np.inf]
|
||||
ref = special.zeta(p, 1)
|
||||
|
||||
with np.errstate(divide='ignore'): # intentionally dividing by zero
|
||||
res = _nsum(f, a, b, args=(p,))
|
||||
|
||||
assert_equal(res.success, [False, False, False, True])
|
||||
assert_equal(res.status, [-3, -3, -2, 0])
|
||||
assert_allclose(res.sum[res.success], ref[res.success])
|
||||
|
||||
def test_nfev(self):
|
||||
def f(x):
|
||||
f.nfev += np.size(x)
|
||||
return 1 / x**2
|
||||
|
||||
f.nfev = 0
|
||||
res = _nsum(f, 1, 10)
|
||||
assert_equal(res.nfev, f.nfev)
|
||||
|
||||
f.nfev = 0
|
||||
res = _nsum(f, 1, np.inf, atol=1e-6)
|
||||
assert_equal(res.nfev, f.nfev)
|
||||
|
||||
def test_inclusive(self):
|
||||
# There was an edge case off-by one bug when `_direct` was called with
|
||||
# `inclusive=True`. Check that this is resolved.
|
||||
res = _nsum(lambda k: 1 / k ** 2, [1, 4], np.inf, maxterms=500, atol=0.1)
|
||||
ref = _nsum(lambda k: 1 / k ** 2, [1, 4], np.inf)
|
||||
assert np.all(res.sum > (ref.sum - res.error))
|
||||
assert np.all(res.sum < (ref.sum + res.error))
|
||||
|
||||
def test_special_case(self):
|
||||
# test equal lower/upper limit
|
||||
f = self.f1
|
||||
a = b = 2
|
||||
res = _nsum(f, a, b)
|
||||
assert_equal(res.sum, f(a))
|
||||
|
||||
# Test scalar `args` (not in tuple)
|
||||
res = _nsum(self.f2, 1, np.inf, args=2)
|
||||
assert_allclose(res.sum, self.f1.ref) # f1.ref is correct w/ args=2
|
||||
|
||||
# Test 0 size input
|
||||
a = np.empty((3, 1, 1)) # arbitrary broadcastable shapes
|
||||
b = np.empty((0, 1)) # could use Hypothesis
|
||||
p = np.empty(4) # but it's overkill
|
||||
shape = np.broadcast_shapes(a.shape, b.shape, p.shape)
|
||||
res = _nsum(self.f2, a, b, args=(p,))
|
||||
assert res.sum.shape == shape
|
||||
assert res.status.shape == shape
|
||||
assert res.nfev.shape == shape
|
||||
|
||||
# Test maxterms=0
|
||||
def f(x):
|
||||
with np.errstate(divide='ignore'):
|
||||
return 1 / x
|
||||
|
||||
res = _nsum(f, 0, 10, maxterms=0)
|
||||
assert np.isnan(res.sum)
|
||||
assert np.isnan(res.error)
|
||||
assert res.status == -2
|
||||
|
||||
res = _nsum(f, 0, 10, maxterms=1)
|
||||
assert np.isnan(res.sum)
|
||||
assert np.isnan(res.error)
|
||||
assert res.status == -3
|
||||
|
||||
# Test NaNs
|
||||
# should skip both direct and integral methods if there are NaNs
|
||||
a = [np.nan, 1, 1, 1]
|
||||
b = [np.inf, np.nan, np.inf, np.inf]
|
||||
p = [2, 2, np.nan, 2]
|
||||
res = _nsum(self.f2, a, b, args=(p,))
|
||||
assert_allclose(res.sum, [np.nan, np.nan, np.nan, self.f1.ref])
|
||||
assert_allclose(res.error[:3], np.nan)
|
||||
assert_equal(res.status, [-1, -1, -3, 0])
|
||||
assert_equal(res.success, [False, False, False, True])
|
||||
# Ideally res.nfev[2] would be 1, but `tanhsinh` has some function evals
|
||||
assert_equal(res.nfev[:2], 1)
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
|
||||
def test_dtype(self, dtype):
|
||||
def f(k):
|
||||
assert k.dtype == dtype
|
||||
return 1 / k ** np.asarray(2, dtype=dtype)[()]
|
||||
|
||||
a = np.asarray(1, dtype=dtype)
|
||||
b = np.asarray([10, np.inf], dtype=dtype)
|
||||
res = _nsum(f, a, b)
|
||||
assert res.sum.dtype == dtype
|
||||
assert res.error.dtype == dtype
|
||||
|
||||
rtol = 1e-12 if dtype == np.float64 else 1e-6
|
||||
ref = _gen_harmonic_gt1(b, 2)
|
||||
assert_allclose(res.sum, ref, rtol=rtol)
|
||||
Reference in New Issue
Block a user