asd
This commit is contained in:
201
venv/lib/python3.12/site-packages/scipy/interpolate/__init__.py
Normal file
201
venv/lib/python3.12/site-packages/scipy/interpolate/__init__.py
Normal file
@ -0,0 +1,201 @@
|
||||
"""
|
||||
========================================
|
||||
Interpolation (:mod:`scipy.interpolate`)
|
||||
========================================
|
||||
|
||||
.. currentmodule:: scipy.interpolate
|
||||
|
||||
Sub-package for objects used in interpolation.
|
||||
|
||||
As listed below, this sub-package contains spline functions and classes,
|
||||
1-D and multidimensional (univariate and multivariate)
|
||||
interpolation classes, Lagrange and Taylor polynomial interpolators, and
|
||||
wrappers for `FITPACK <http://www.netlib.org/dierckx/>`__
|
||||
and DFITPACK functions.
|
||||
|
||||
Univariate interpolation
|
||||
========================
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
interp1d
|
||||
BarycentricInterpolator
|
||||
KroghInterpolator
|
||||
barycentric_interpolate
|
||||
krogh_interpolate
|
||||
pchip_interpolate
|
||||
CubicHermiteSpline
|
||||
PchipInterpolator
|
||||
Akima1DInterpolator
|
||||
CubicSpline
|
||||
PPoly
|
||||
BPoly
|
||||
|
||||
|
||||
Multivariate interpolation
|
||||
==========================
|
||||
|
||||
Unstructured data:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
griddata
|
||||
LinearNDInterpolator
|
||||
NearestNDInterpolator
|
||||
CloughTocher2DInterpolator
|
||||
RBFInterpolator
|
||||
Rbf
|
||||
interp2d
|
||||
|
||||
For data on a grid:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
interpn
|
||||
RegularGridInterpolator
|
||||
RectBivariateSpline
|
||||
|
||||
.. seealso::
|
||||
|
||||
`scipy.ndimage.map_coordinates`
|
||||
|
||||
Tensor product polynomials:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
NdPPoly
|
||||
NdBSpline
|
||||
|
||||
1-D Splines
|
||||
===========
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
BSpline
|
||||
make_interp_spline
|
||||
make_lsq_spline
|
||||
make_smoothing_spline
|
||||
|
||||
Functional interface to FITPACK routines:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
splrep
|
||||
splprep
|
||||
splev
|
||||
splint
|
||||
sproot
|
||||
spalde
|
||||
splder
|
||||
splantider
|
||||
insert
|
||||
|
||||
Object-oriented FITPACK interface:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
UnivariateSpline
|
||||
InterpolatedUnivariateSpline
|
||||
LSQUnivariateSpline
|
||||
|
||||
|
||||
|
||||
2-D Splines
|
||||
===========
|
||||
|
||||
For data on a grid:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
RectBivariateSpline
|
||||
RectSphereBivariateSpline
|
||||
|
||||
For unstructured data:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
BivariateSpline
|
||||
SmoothBivariateSpline
|
||||
SmoothSphereBivariateSpline
|
||||
LSQBivariateSpline
|
||||
LSQSphereBivariateSpline
|
||||
|
||||
Low-level interface to FITPACK functions:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
bisplrep
|
||||
bisplev
|
||||
|
||||
Additional tools
|
||||
================
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
lagrange
|
||||
approximate_taylor_polynomial
|
||||
pade
|
||||
|
||||
.. seealso::
|
||||
|
||||
`scipy.ndimage.map_coordinates`,
|
||||
`scipy.ndimage.spline_filter`,
|
||||
`scipy.signal.resample`,
|
||||
`scipy.signal.bspline`,
|
||||
`scipy.signal.gauss_spline`,
|
||||
`scipy.signal.qspline1d`,
|
||||
`scipy.signal.cspline1d`,
|
||||
`scipy.signal.qspline1d_eval`,
|
||||
`scipy.signal.cspline1d_eval`,
|
||||
`scipy.signal.qspline2d`,
|
||||
`scipy.signal.cspline2d`.
|
||||
|
||||
``pchip`` is an alias of `PchipInterpolator` for backward compatibility
|
||||
(should not be used in new code).
|
||||
"""
|
||||
from ._interpolate import *
|
||||
from ._fitpack_py import *
|
||||
|
||||
# New interface to fitpack library:
|
||||
from ._fitpack2 import *
|
||||
|
||||
from ._rbf import Rbf
|
||||
|
||||
from ._rbfinterp import *
|
||||
|
||||
from ._polyint import *
|
||||
|
||||
from ._cubic import *
|
||||
|
||||
from ._ndgriddata import *
|
||||
|
||||
from ._bsplines import *
|
||||
|
||||
from ._pade import *
|
||||
|
||||
from ._rgi import *
|
||||
|
||||
from ._ndbspline import NdBSpline
|
||||
|
||||
# Deprecated namespaces, to be removed in v2.0.0
|
||||
from . import fitpack, fitpack2, interpolate, ndgriddata, polyint, rbf
|
||||
|
||||
__all__ = [s for s in dir() if not s.startswith('_')]
|
||||
|
||||
from scipy._lib._testutils import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
|
||||
# Backward compatibility
|
||||
pchip = PchipInterpolator
|
||||
Binary file not shown.
2221
venv/lib/python3.12/site-packages/scipy/interpolate/_bsplines.py
Normal file
2221
venv/lib/python3.12/site-packages/scipy/interpolate/_bsplines.py
Normal file
File diff suppressed because it is too large
Load Diff
980
venv/lib/python3.12/site-packages/scipy/interpolate/_cubic.py
Normal file
980
venv/lib/python3.12/site-packages/scipy/interpolate/_cubic.py
Normal file
@ -0,0 +1,980 @@
|
||||
"""Interpolation algorithms using piecewise cubic polynomials."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
|
||||
from scipy.linalg import solve, solve_banded
|
||||
|
||||
from . import PPoly
|
||||
from ._polyint import _isscalar
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Literal
|
||||
|
||||
__all__ = ["CubicHermiteSpline", "PchipInterpolator", "pchip_interpolate",
|
||||
"Akima1DInterpolator", "CubicSpline"]
|
||||
|
||||
|
||||
def prepare_input(x, y, axis, dydx=None):
|
||||
"""Prepare input for cubic spline interpolators.
|
||||
|
||||
All data are converted to numpy arrays and checked for correctness.
|
||||
Axes equal to `axis` of arrays `y` and `dydx` are moved to be the 0th
|
||||
axis. The value of `axis` is converted to lie in
|
||||
[0, number of dimensions of `y`).
|
||||
"""
|
||||
|
||||
x, y = map(np.asarray, (x, y))
|
||||
if np.issubdtype(x.dtype, np.complexfloating):
|
||||
raise ValueError("`x` must contain real values.")
|
||||
x = x.astype(float)
|
||||
|
||||
if np.issubdtype(y.dtype, np.complexfloating):
|
||||
dtype = complex
|
||||
else:
|
||||
dtype = float
|
||||
|
||||
if dydx is not None:
|
||||
dydx = np.asarray(dydx)
|
||||
if y.shape != dydx.shape:
|
||||
raise ValueError("The shapes of `y` and `dydx` must be identical.")
|
||||
if np.issubdtype(dydx.dtype, np.complexfloating):
|
||||
dtype = complex
|
||||
dydx = dydx.astype(dtype, copy=False)
|
||||
|
||||
y = y.astype(dtype, copy=False)
|
||||
axis = axis % y.ndim
|
||||
if x.ndim != 1:
|
||||
raise ValueError("`x` must be 1-dimensional.")
|
||||
if x.shape[0] < 2:
|
||||
raise ValueError("`x` must contain at least 2 elements.")
|
||||
if x.shape[0] != y.shape[axis]:
|
||||
raise ValueError(f"The length of `y` along `axis`={axis} doesn't "
|
||||
"match the length of `x`")
|
||||
|
||||
if not np.all(np.isfinite(x)):
|
||||
raise ValueError("`x` must contain only finite values.")
|
||||
if not np.all(np.isfinite(y)):
|
||||
raise ValueError("`y` must contain only finite values.")
|
||||
|
||||
if dydx is not None and not np.all(np.isfinite(dydx)):
|
||||
raise ValueError("`dydx` must contain only finite values.")
|
||||
|
||||
dx = np.diff(x)
|
||||
if np.any(dx <= 0):
|
||||
raise ValueError("`x` must be strictly increasing sequence.")
|
||||
|
||||
y = np.moveaxis(y, axis, 0)
|
||||
if dydx is not None:
|
||||
dydx = np.moveaxis(dydx, axis, 0)
|
||||
|
||||
return x, dx, y, axis, dydx
|
||||
|
||||
|
||||
class CubicHermiteSpline(PPoly):
|
||||
"""Piecewise-cubic interpolator matching values and first derivatives.
|
||||
|
||||
The result is represented as a `PPoly` instance.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like, shape (n,)
|
||||
1-D array containing values of the independent variable.
|
||||
Values must be real, finite and in strictly increasing order.
|
||||
y : array_like
|
||||
Array containing values of the dependent variable. It can have
|
||||
arbitrary number of dimensions, but the length along ``axis``
|
||||
(see below) must match the length of ``x``. Values must be finite.
|
||||
dydx : array_like
|
||||
Array containing derivatives of the dependent variable. It can have
|
||||
arbitrary number of dimensions, but the length along ``axis``
|
||||
(see below) must match the length of ``x``. Values must be finite.
|
||||
axis : int, optional
|
||||
Axis along which `y` is assumed to be varying. Meaning that for
|
||||
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
|
||||
Default is 0.
|
||||
extrapolate : {bool, 'periodic', None}, optional
|
||||
If bool, determines whether to extrapolate to out-of-bounds points
|
||||
based on first and last intervals, or to return NaNs. If 'periodic',
|
||||
periodic extrapolation is used. If None (default), it is set to True.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
x : ndarray, shape (n,)
|
||||
Breakpoints. The same ``x`` which was passed to the constructor.
|
||||
c : ndarray, shape (4, n-1, ...)
|
||||
Coefficients of the polynomials on each segment. The trailing
|
||||
dimensions match the dimensions of `y`, excluding ``axis``.
|
||||
For example, if `y` is 1-D, then ``c[k, i]`` is a coefficient for
|
||||
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
|
||||
axis : int
|
||||
Interpolation axis. The same axis which was passed to the
|
||||
constructor.
|
||||
|
||||
Methods
|
||||
-------
|
||||
__call__
|
||||
derivative
|
||||
antiderivative
|
||||
integrate
|
||||
roots
|
||||
|
||||
See Also
|
||||
--------
|
||||
Akima1DInterpolator : Akima 1D interpolator.
|
||||
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
|
||||
CubicSpline : Cubic spline data interpolator.
|
||||
PPoly : Piecewise polynomial in terms of coefficients and breakpoints
|
||||
|
||||
Notes
|
||||
-----
|
||||
If you want to create a higher-order spline matching higher-order
|
||||
derivatives, use `BPoly.from_derivatives`.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] `Cubic Hermite spline
|
||||
<https://en.wikipedia.org/wiki/Cubic_Hermite_spline>`_
|
||||
on Wikipedia.
|
||||
"""
|
||||
|
||||
def __init__(self, x, y, dydx, axis=0, extrapolate=None):
|
||||
if extrapolate is None:
|
||||
extrapolate = True
|
||||
|
||||
x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)
|
||||
|
||||
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
|
||||
slope = np.diff(y, axis=0) / dxr
|
||||
t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr
|
||||
|
||||
c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)
|
||||
c[0] = t / dxr
|
||||
c[1] = (slope - dydx[:-1]) / dxr - t
|
||||
c[2] = dydx[:-1]
|
||||
c[3] = y[:-1]
|
||||
|
||||
super().__init__(c, x, extrapolate=extrapolate)
|
||||
self.axis = axis
|
||||
|
||||
|
||||
class PchipInterpolator(CubicHermiteSpline):
|
||||
r"""PCHIP 1-D monotonic cubic interpolation.
|
||||
|
||||
``x`` and ``y`` are arrays of values used to approximate some function f,
|
||||
with ``y = f(x)``. The interpolant uses monotonic cubic splines
|
||||
to find the value of new points. (PCHIP stands for Piecewise Cubic
|
||||
Hermite Interpolating Polynomial).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : ndarray, shape (npoints, )
|
||||
A 1-D array of monotonically increasing real values. ``x`` cannot
|
||||
include duplicate values (otherwise f is overspecified)
|
||||
y : ndarray, shape (..., npoints, ...)
|
||||
A N-D array of real values. ``y``'s length along the interpolation
|
||||
axis must be equal to the length of ``x``. Use the ``axis``
|
||||
parameter to select the interpolation axis.
|
||||
|
||||
.. deprecated:: 1.13.0
|
||||
Complex data is deprecated and will raise an error in SciPy 1.15.0.
|
||||
If you are trying to use the real components of the passed array,
|
||||
use ``np.real`` on ``y``.
|
||||
|
||||
axis : int, optional
|
||||
Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
|
||||
to ``axis=0``.
|
||||
extrapolate : bool, optional
|
||||
Whether to extrapolate to out-of-bounds points based on first
|
||||
and last intervals, or to return NaNs.
|
||||
|
||||
Methods
|
||||
-------
|
||||
__call__
|
||||
derivative
|
||||
antiderivative
|
||||
roots
|
||||
|
||||
See Also
|
||||
--------
|
||||
CubicHermiteSpline : Piecewise-cubic interpolator.
|
||||
Akima1DInterpolator : Akima 1D interpolator.
|
||||
CubicSpline : Cubic spline data interpolator.
|
||||
PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The interpolator preserves monotonicity in the interpolation data and does
|
||||
not overshoot if the data is not smooth.
|
||||
|
||||
The first derivatives are guaranteed to be continuous, but the second
|
||||
derivatives may jump at :math:`x_k`.
|
||||
|
||||
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
|
||||
by using PCHIP algorithm [1]_.
|
||||
|
||||
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
|
||||
are the slopes at internal points :math:`x_k`.
|
||||
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
|
||||
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
|
||||
weighted harmonic mean
|
||||
|
||||
.. math::
|
||||
|
||||
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
|
||||
|
||||
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
|
||||
|
||||
The end slopes are set using a one-sided scheme [2]_.
|
||||
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] F. N. Fritsch and J. Butland,
|
||||
A method for constructing local
|
||||
monotone piecewise cubic interpolants,
|
||||
SIAM J. Sci. Comput., 5(2), 300-304 (1984).
|
||||
:doi:`10.1137/0905021`.
|
||||
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
|
||||
:doi:`10.1137/1.9780898717952`
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, x, y, axis=0, extrapolate=None):
|
||||
x, _, y, axis, _ = prepare_input(x, y, axis)
|
||||
if np.iscomplexobj(y):
|
||||
msg = ("`PchipInterpolator` only works with real values for `y`. "
|
||||
"Passing an array with a complex dtype for `y` is deprecated "
|
||||
"and will raise an error in SciPy 1.15.0. If you are trying to "
|
||||
"use the real components of the passed array, use `np.real` on "
|
||||
"the array before passing to `PchipInterpolator`.")
|
||||
warnings.warn(msg, DeprecationWarning, stacklevel=2)
|
||||
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
|
||||
dk = self._find_derivatives(xp, y)
|
||||
super().__init__(x, y, dk, axis=0, extrapolate=extrapolate)
|
||||
self.axis = axis
|
||||
|
||||
@staticmethod
|
||||
def _edge_case(h0, h1, m0, m1):
|
||||
# one-sided three-point estimate for the derivative
|
||||
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
|
||||
|
||||
# try to preserve shape
|
||||
mask = np.sign(d) != np.sign(m0)
|
||||
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
|
||||
mmm = (~mask) & mask2
|
||||
|
||||
d[mask] = 0.
|
||||
d[mmm] = 3.*m0[mmm]
|
||||
|
||||
return d
|
||||
|
||||
@staticmethod
|
||||
def _find_derivatives(x, y):
|
||||
# Determine the derivatives at the points y_k, d_k, by using
|
||||
# PCHIP algorithm is:
|
||||
# We choose the derivatives at the point x_k by
|
||||
# Let m_k be the slope of the kth segment (between k and k+1)
|
||||
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
|
||||
# else use weighted harmonic mean:
|
||||
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
|
||||
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
|
||||
# where h_k is the spacing between x_k and x_{k+1}
|
||||
y_shape = y.shape
|
||||
if y.ndim == 1:
|
||||
# So that _edge_case doesn't end up assigning to scalars
|
||||
x = x[:, None]
|
||||
y = y[:, None]
|
||||
|
||||
hk = x[1:] - x[:-1]
|
||||
mk = (y[1:] - y[:-1]) / hk
|
||||
|
||||
if y.shape[0] == 2:
|
||||
# edge case: only have two points, use linear interpolation
|
||||
dk = np.zeros_like(y)
|
||||
dk[0] = mk
|
||||
dk[1] = mk
|
||||
return dk.reshape(y_shape)
|
||||
|
||||
smk = np.sign(mk)
|
||||
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
|
||||
|
||||
w1 = 2*hk[1:] + hk[:-1]
|
||||
w2 = hk[1:] + 2*hk[:-1]
|
||||
|
||||
# values where division by zero occurs will be excluded
|
||||
# by 'condition' afterwards
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
|
||||
|
||||
dk = np.zeros_like(y)
|
||||
dk[1:-1][condition] = 0.0
|
||||
dk[1:-1][~condition] = 1.0 / whmean[~condition]
|
||||
|
||||
# special case endpoints, as suggested in
|
||||
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.6 (pchiptx.m)
|
||||
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
|
||||
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
|
||||
|
||||
return dk.reshape(y_shape)
|
||||
|
||||
|
||||
def pchip_interpolate(xi, yi, x, der=0, axis=0):
|
||||
"""
|
||||
Convenience function for pchip interpolation.
|
||||
|
||||
xi and yi are arrays of values used to approximate some function f,
|
||||
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
|
||||
to find the value of new points x and the derivatives there.
|
||||
|
||||
See `scipy.interpolate.PchipInterpolator` for details.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
xi : array_like
|
||||
A sorted list of x-coordinates, of length N.
|
||||
yi : array_like
|
||||
A 1-D array of real values. `yi`'s length along the interpolation
|
||||
axis must be equal to the length of `xi`. If N-D array, use axis
|
||||
parameter to select correct axis.
|
||||
|
||||
.. deprecated:: 1.13.0
|
||||
Complex data is deprecated and will raise an error in
|
||||
SciPy 1.15.0. If you are trying to use the real components of
|
||||
the passed array, use ``np.real`` on `yi`.
|
||||
|
||||
x : scalar or array_like
|
||||
Of length M.
|
||||
der : int or list, optional
|
||||
Derivatives to extract. The 0th derivative can be included to
|
||||
return the function value.
|
||||
axis : int, optional
|
||||
Axis in the yi array corresponding to the x-coordinate values.
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : scalar or array_like
|
||||
The result, of length R or length M or M by R.
|
||||
|
||||
See Also
|
||||
--------
|
||||
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
|
||||
|
||||
Examples
|
||||
--------
|
||||
We can interpolate 2D observed data using pchip interpolation:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> from scipy.interpolate import pchip_interpolate
|
||||
>>> x_observed = np.linspace(0.0, 10.0, 11)
|
||||
>>> y_observed = np.sin(x_observed)
|
||||
>>> x = np.linspace(min(x_observed), max(x_observed), num=100)
|
||||
>>> y = pchip_interpolate(x_observed, y_observed, x)
|
||||
>>> plt.plot(x_observed, y_observed, "o", label="observation")
|
||||
>>> plt.plot(x, y, label="pchip interpolation")
|
||||
>>> plt.legend()
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
P = PchipInterpolator(xi, yi, axis=axis)
|
||||
|
||||
if der == 0:
|
||||
return P(x)
|
||||
elif _isscalar(der):
|
||||
return P.derivative(der)(x)
|
||||
else:
|
||||
return [P.derivative(nu)(x) for nu in der]
|
||||
|
||||
|
||||
class Akima1DInterpolator(CubicHermiteSpline):
|
||||
r"""
|
||||
Akima interpolator
|
||||
|
||||
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
|
||||
method by Akima uses a continuously differentiable sub-spline built from
|
||||
piecewise cubic polynomials. The resultant curve passes through the given
|
||||
data points and will appear smooth and natural.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : ndarray, shape (npoints, )
|
||||
1-D array of monotonically increasing real values.
|
||||
y : ndarray, shape (..., npoints, ...)
|
||||
N-D array of real values. The length of ``y`` along the interpolation axis
|
||||
must be equal to the length of ``x``. Use the ``axis`` parameter to
|
||||
select the interpolation axis.
|
||||
|
||||
.. deprecated:: 1.13.0
|
||||
Complex data is deprecated and will raise an error in SciPy 1.15.0.
|
||||
If you are trying to use the real components of the passed array,
|
||||
use ``np.real`` on ``y``.
|
||||
|
||||
axis : int, optional
|
||||
Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
|
||||
to ``axis=0``.
|
||||
method : {'akima', 'makima'}, optional
|
||||
If ``"makima"``, use the modified Akima interpolation [2]_.
|
||||
Defaults to ``"akima"``, use the Akima interpolation [1]_.
|
||||
|
||||
.. versionadded:: 1.13.0
|
||||
|
||||
extrapolate : {bool, None}, optional
|
||||
If bool, determines whether to extrapolate to out-of-bounds points
|
||||
based on first and last intervals, or to return NaNs. If None,
|
||||
``extrapolate`` is set to False.
|
||||
|
||||
Methods
|
||||
-------
|
||||
__call__
|
||||
derivative
|
||||
antiderivative
|
||||
roots
|
||||
|
||||
See Also
|
||||
--------
|
||||
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
|
||||
CubicSpline : Cubic spline data interpolator.
|
||||
PPoly : Piecewise polynomial in terms of coefficients and breakpoints
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.14
|
||||
|
||||
Use only for precise data, as the fitted curve passes through the given
|
||||
points exactly. This routine is useful for plotting a pleasingly smooth
|
||||
curve through a few given points for purposes of plotting.
|
||||
|
||||
Let :math:`\delta_i = (y_{i+1} - y_i) / (x_{i+1} - x_i)` be the slopes of
|
||||
the interval :math:`\left[x_i, x_{i+1}\right)`. Akima's derivative at
|
||||
:math:`x_i` is defined as:
|
||||
|
||||
.. math::
|
||||
|
||||
d_i = \frac{w_1}{w_1 + w_2}\delta_{i-1} + \frac{w_2}{w_1 + w_2}\delta_i
|
||||
|
||||
In the Akima interpolation [1]_ (``method="akima"``), the weights are:
|
||||
|
||||
.. math::
|
||||
|
||||
\begin{aligned}
|
||||
w_1 &= |\delta_{i+1} - \delta_i| \\
|
||||
w_2 &= |\delta_{i-1} - \delta_{i-2}|
|
||||
\end{aligned}
|
||||
|
||||
In the modified Akima interpolation [2]_ (``method="makima"``),
|
||||
to eliminate overshoot and avoid edge cases of both numerator and
|
||||
denominator being equal to 0, the weights are modified as follows:
|
||||
|
||||
.. math::
|
||||
|
||||
\begin{align*}
|
||||
w_1 &= |\delta_{i+1} - \delta_i| + |\delta_{i+1} + \delta_i| / 2 \\
|
||||
w_2 &= |\delta_{i-1} - \delta_{i-2}| + |\delta_{i-1} + \delta_{i-2}| / 2
|
||||
\end{align*}
|
||||
|
||||
Examples
|
||||
--------
|
||||
Comparison of ``method="akima"`` and ``method="makima"``:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> from scipy.interpolate import Akima1DInterpolator
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> x = np.linspace(1, 7, 7)
|
||||
>>> y = np.array([-1, -1, -1, 0, 1, 1, 1])
|
||||
>>> xs = np.linspace(min(x), max(x), num=100)
|
||||
>>> y_akima = Akima1DInterpolator(x, y, method="akima")(xs)
|
||||
>>> y_makima = Akima1DInterpolator(x, y, method="makima")(xs)
|
||||
|
||||
>>> fig, ax = plt.subplots()
|
||||
>>> ax.plot(x, y, "o", label="data")
|
||||
>>> ax.plot(xs, y_akima, label="akima")
|
||||
>>> ax.plot(xs, y_makima, label="makima")
|
||||
>>> ax.legend()
|
||||
>>> fig.show()
|
||||
|
||||
The overshoot that occured in ``"akima"`` has been avoided in ``"makima"``.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] A new method of interpolation and smooth curve fitting based
|
||||
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
|
||||
589-602. :doi:`10.1145/321607.321609`
|
||||
.. [2] Makima Piecewise Cubic Interpolation. Cleve Moler and Cosmin Ionita, 2019.
|
||||
https://blogs.mathworks.com/cleve/2019/04/29/makima-piecewise-cubic-interpolation/
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, x, y, axis=0, *, method: Literal["akima", "makima"]="akima",
|
||||
extrapolate:bool | None = None):
|
||||
if method not in {"akima", "makima"}:
|
||||
raise NotImplementedError(f"`method`={method} is unsupported.")
|
||||
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
|
||||
# https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
|
||||
x, dx, y, axis, _ = prepare_input(x, y, axis)
|
||||
|
||||
if np.iscomplexobj(y):
|
||||
msg = ("`Akima1DInterpolator` only works with real values for `y`. "
|
||||
"Passing an array with a complex dtype for `y` is deprecated "
|
||||
"and will raise an error in SciPy 1.15.0. If you are trying to "
|
||||
"use the real components of the passed array, use `np.real` on "
|
||||
"the array before passing to `Akima1DInterpolator`.")
|
||||
warnings.warn(msg, DeprecationWarning, stacklevel=2)
|
||||
|
||||
# Akima extrapolation historically False; parent class defaults to True.
|
||||
extrapolate = False if extrapolate is None else extrapolate
|
||||
|
||||
# determine slopes between breakpoints
|
||||
m = np.empty((x.size + 3, ) + y.shape[1:])
|
||||
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
|
||||
m[2:-2] = np.diff(y, axis=0) / dx
|
||||
|
||||
# add two additional points on the left ...
|
||||
m[1] = 2. * m[2] - m[3]
|
||||
m[0] = 2. * m[1] - m[2]
|
||||
# ... and on the right
|
||||
m[-2] = 2. * m[-3] - m[-4]
|
||||
m[-1] = 2. * m[-2] - m[-3]
|
||||
|
||||
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not
|
||||
# defined. This is the fill value:
|
||||
t = .5 * (m[3:] + m[:-3])
|
||||
# get the denominator of the slope t
|
||||
dm = np.abs(np.diff(m, axis=0))
|
||||
if method == "makima":
|
||||
pm = np.abs(m[1:] + m[:-1])
|
||||
f1 = dm[2:] + 0.5 * pm[2:]
|
||||
f2 = dm[:-2] + 0.5 * pm[:-2]
|
||||
else:
|
||||
f1 = dm[2:]
|
||||
f2 = dm[:-2]
|
||||
f12 = f1 + f2
|
||||
# These are the mask of where the slope at breakpoint is defined:
|
||||
ind = np.nonzero(f12 > 1e-9 * np.max(f12, initial=-np.inf))
|
||||
x_ind, y_ind = ind[0], ind[1:]
|
||||
# Set the slope at breakpoint
|
||||
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
|
||||
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
|
||||
|
||||
super().__init__(x, y, t, axis=0, extrapolate=extrapolate)
|
||||
self.axis = axis
|
||||
|
||||
def extend(self, c, x, right=True):
|
||||
raise NotImplementedError("Extending a 1-D Akima interpolator is not "
|
||||
"yet implemented")
|
||||
|
||||
# These are inherited from PPoly, but they do not produce an Akima
|
||||
# interpolator. Hence stub them out.
|
||||
@classmethod
|
||||
def from_spline(cls, tck, extrapolate=None):
|
||||
raise NotImplementedError("This method does not make sense for "
|
||||
"an Akima interpolator.")
|
||||
|
||||
@classmethod
|
||||
def from_bernstein_basis(cls, bp, extrapolate=None):
|
||||
raise NotImplementedError("This method does not make sense for "
|
||||
"an Akima interpolator.")
|
||||
|
||||
|
||||
class CubicSpline(CubicHermiteSpline):
|
||||
"""Cubic spline data interpolator.
|
||||
|
||||
Interpolate data with a piecewise cubic polynomial which is twice
|
||||
continuously differentiable [1]_. The result is represented as a `PPoly`
|
||||
instance with breakpoints matching the given data.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like, shape (n,)
|
||||
1-D array containing values of the independent variable.
|
||||
Values must be real, finite and in strictly increasing order.
|
||||
y : array_like
|
||||
Array containing values of the dependent variable. It can have
|
||||
arbitrary number of dimensions, but the length along ``axis``
|
||||
(see below) must match the length of ``x``. Values must be finite.
|
||||
axis : int, optional
|
||||
Axis along which `y` is assumed to be varying. Meaning that for
|
||||
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
|
||||
Default is 0.
|
||||
bc_type : string or 2-tuple, optional
|
||||
Boundary condition type. Two additional equations, given by the
|
||||
boundary conditions, are required to determine all coefficients of
|
||||
polynomials on each segment [2]_.
|
||||
|
||||
If `bc_type` is a string, then the specified condition will be applied
|
||||
at both ends of a spline. Available conditions are:
|
||||
|
||||
* 'not-a-knot' (default): The first and second segment at a curve end
|
||||
are the same polynomial. It is a good default when there is no
|
||||
information on boundary conditions.
|
||||
* 'periodic': The interpolated functions is assumed to be periodic
|
||||
of period ``x[-1] - x[0]``. The first and last value of `y` must be
|
||||
identical: ``y[0] == y[-1]``. This boundary condition will result in
|
||||
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
|
||||
* 'clamped': The first derivative at curves ends are zero. Assuming
|
||||
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
|
||||
* 'natural': The second derivative at curve ends are zero. Assuming
|
||||
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
|
||||
|
||||
If `bc_type` is a 2-tuple, the first and the second value will be
|
||||
applied at the curve start and end respectively. The tuple values can
|
||||
be one of the previously mentioned strings (except 'periodic') or a
|
||||
tuple `(order, deriv_values)` allowing to specify arbitrary
|
||||
derivatives at curve ends:
|
||||
|
||||
* `order`: the derivative order, 1 or 2.
|
||||
* `deriv_value`: array_like containing derivative values, shape must
|
||||
be the same as `y`, excluding ``axis`` dimension. For example, if
|
||||
`y` is 1-D, then `deriv_value` must be a scalar. If `y` is 3-D with
|
||||
the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2-D
|
||||
and have the shape (n0, n1).
|
||||
extrapolate : {bool, 'periodic', None}, optional
|
||||
If bool, determines whether to extrapolate to out-of-bounds points
|
||||
based on first and last intervals, or to return NaNs. If 'periodic',
|
||||
periodic extrapolation is used. If None (default), ``extrapolate`` is
|
||||
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
x : ndarray, shape (n,)
|
||||
Breakpoints. The same ``x`` which was passed to the constructor.
|
||||
c : ndarray, shape (4, n-1, ...)
|
||||
Coefficients of the polynomials on each segment. The trailing
|
||||
dimensions match the dimensions of `y`, excluding ``axis``.
|
||||
For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
|
||||
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
|
||||
axis : int
|
||||
Interpolation axis. The same axis which was passed to the
|
||||
constructor.
|
||||
|
||||
Methods
|
||||
-------
|
||||
__call__
|
||||
derivative
|
||||
antiderivative
|
||||
integrate
|
||||
roots
|
||||
|
||||
See Also
|
||||
--------
|
||||
Akima1DInterpolator : Akima 1D interpolator.
|
||||
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
|
||||
PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Parameters `bc_type` and ``extrapolate`` work independently, i.e. the
|
||||
former controls only construction of a spline, and the latter only
|
||||
evaluation.
|
||||
|
||||
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
|
||||
a condition that the first derivative is equal to the linear interpolant
|
||||
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
|
||||
solution is sought as a parabola passing through given points.
|
||||
|
||||
When 'not-a-knot' boundary conditions is applied to both ends, the
|
||||
resulting spline will be the same as returned by `splrep` (with ``s=0``)
|
||||
and `InterpolatedUnivariateSpline`, but these two methods use a
|
||||
representation in B-spline basis.
|
||||
|
||||
.. versionadded:: 0.18.0
|
||||
|
||||
Examples
|
||||
--------
|
||||
In this example the cubic spline is used to interpolate a sampled sinusoid.
|
||||
You can see that the spline continuity property holds for the first and
|
||||
second derivatives and violates only for the third derivative.
|
||||
|
||||
>>> import numpy as np
|
||||
>>> from scipy.interpolate import CubicSpline
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> x = np.arange(10)
|
||||
>>> y = np.sin(x)
|
||||
>>> cs = CubicSpline(x, y)
|
||||
>>> xs = np.arange(-0.5, 9.6, 0.1)
|
||||
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
|
||||
>>> ax.plot(x, y, 'o', label='data')
|
||||
>>> ax.plot(xs, np.sin(xs), label='true')
|
||||
>>> ax.plot(xs, cs(xs), label="S")
|
||||
>>> ax.plot(xs, cs(xs, 1), label="S'")
|
||||
>>> ax.plot(xs, cs(xs, 2), label="S''")
|
||||
>>> ax.plot(xs, cs(xs, 3), label="S'''")
|
||||
>>> ax.set_xlim(-0.5, 9.5)
|
||||
>>> ax.legend(loc='lower left', ncol=2)
|
||||
>>> plt.show()
|
||||
|
||||
In the second example, the unit circle is interpolated with a spline. A
|
||||
periodic boundary condition is used. You can see that the first derivative
|
||||
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
|
||||
computed. Note that a circle cannot be exactly represented by a cubic
|
||||
spline. To increase precision, more breakpoints would be required.
|
||||
|
||||
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
|
||||
>>> y = np.c_[np.cos(theta), np.sin(theta)]
|
||||
>>> cs = CubicSpline(theta, y, bc_type='periodic')
|
||||
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
|
||||
ds/dx=0.0 ds/dy=1.0
|
||||
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
|
||||
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
|
||||
>>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
|
||||
>>> ax.plot(np.cos(xs), np.sin(xs), label='true')
|
||||
>>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
|
||||
>>> ax.axes.set_aspect('equal')
|
||||
>>> ax.legend(loc='center')
|
||||
>>> plt.show()
|
||||
|
||||
The third example is the interpolation of a polynomial y = x**3 on the
|
||||
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
|
||||
To achieve that we need to specify values and first derivatives at
|
||||
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
|
||||
y'(1) = 3.
|
||||
|
||||
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
|
||||
>>> x = np.linspace(0, 1)
|
||||
>>> np.allclose(x**3, cs(x))
|
||||
True
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] `Cubic Spline Interpolation
|
||||
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
|
||||
on Wikiversity.
|
||||
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
|
||||
"""
|
||||
|
||||
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
|
||||
x, dx, y, axis, _ = prepare_input(x, y, axis)
|
||||
n = len(x)
|
||||
|
||||
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
|
||||
|
||||
if extrapolate is None:
|
||||
if bc[0] == 'periodic':
|
||||
extrapolate = 'periodic'
|
||||
else:
|
||||
extrapolate = True
|
||||
|
||||
if y.size == 0:
|
||||
# bail out early for zero-sized arrays
|
||||
s = np.zeros_like(y)
|
||||
else:
|
||||
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
|
||||
slope = np.diff(y, axis=0) / dxr
|
||||
|
||||
# If bc is 'not-a-knot' this change is just a convention.
|
||||
# If bc is 'periodic' then we already checked that y[0] == y[-1],
|
||||
# and the spline is just a constant, we handle this case in the
|
||||
# same way by setting the first derivatives to slope, which is 0.
|
||||
if n == 2:
|
||||
if bc[0] in ['not-a-knot', 'periodic']:
|
||||
bc[0] = (1, slope[0])
|
||||
if bc[1] in ['not-a-knot', 'periodic']:
|
||||
bc[1] = (1, slope[0])
|
||||
|
||||
# This is a special case, when both conditions are 'not-a-knot'
|
||||
# and n == 3. In this case 'not-a-knot' can't be handled regularly
|
||||
# as the both conditions are identical. We handle this case by
|
||||
# constructing a parabola passing through given points.
|
||||
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
|
||||
A = np.zeros((3, 3)) # This is a standard matrix.
|
||||
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
|
||||
|
||||
A[0, 0] = 1
|
||||
A[0, 1] = 1
|
||||
A[1, 0] = dx[1]
|
||||
A[1, 1] = 2 * (dx[0] + dx[1])
|
||||
A[1, 2] = dx[0]
|
||||
A[2, 1] = 1
|
||||
A[2, 2] = 1
|
||||
|
||||
b[0] = 2 * slope[0]
|
||||
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
|
||||
b[2] = 2 * slope[1]
|
||||
|
||||
s = solve(A, b, overwrite_a=True, overwrite_b=True,
|
||||
check_finite=False)
|
||||
elif n == 3 and bc[0] == 'periodic':
|
||||
# In case when number of points is 3 we compute the derivatives
|
||||
# manually
|
||||
t = (slope / dxr).sum(0) / (1. / dxr).sum(0)
|
||||
s = np.broadcast_to(t, (n,) + y.shape[1:])
|
||||
else:
|
||||
# Find derivative values at each x[i] by solving a tridiagonal
|
||||
# system.
|
||||
A = np.zeros((3, n)) # This is a banded matrix representation.
|
||||
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
|
||||
|
||||
# Filling the system for i=1..n-2
|
||||
# (x[i-1] - x[i]) * s[i-1] +\
|
||||
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
|
||||
# (x[i] - x[i-1]) * s[i+1] =\
|
||||
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
|
||||
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
|
||||
|
||||
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
|
||||
A[0, 2:] = dx[:-1] # The upper diagonal
|
||||
A[-1, :-2] = dx[1:] # The lower diagonal
|
||||
|
||||
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
|
||||
|
||||
bc_start, bc_end = bc
|
||||
|
||||
if bc_start == 'periodic':
|
||||
# Due to the periodicity, and because y[-1] = y[0], the
|
||||
# linear system has (n-1) unknowns/equations instead of n:
|
||||
A = A[:, 0:-1]
|
||||
A[1, 0] = 2 * (dx[-1] + dx[0])
|
||||
A[0, 1] = dx[-1]
|
||||
|
||||
b = b[:-1]
|
||||
|
||||
# Also, due to the periodicity, the system is not tri-diagonal.
|
||||
# We need to compute a "condensed" matrix of shape (n-2, n-2).
|
||||
# See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
|
||||
# for more explanations.
|
||||
# The condensed matrix is obtained by removing the last column
|
||||
# and last row of the (n-1, n-1) system matrix. The removed
|
||||
# values are saved in scalar variables with the (n-1, n-1)
|
||||
# system matrix indices forming their names:
|
||||
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
|
||||
a_m1_m2 = dx[-1]
|
||||
a_m1_m1 = 2 * (dx[-1] + dx[-2])
|
||||
a_m2_m1 = dx[-3]
|
||||
a_0_m1 = dx[0]
|
||||
|
||||
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
|
||||
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
|
||||
|
||||
Ac = A[:, :-1]
|
||||
b1 = b[:-1]
|
||||
b2 = np.zeros_like(b1)
|
||||
b2[0] = -a_0_m1
|
||||
b2[-1] = -a_m2_m1
|
||||
|
||||
# s1 and s2 are the solutions of (n-2, n-2) system
|
||||
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
|
||||
overwrite_b=False, check_finite=False)
|
||||
|
||||
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
|
||||
overwrite_b=False, check_finite=False)
|
||||
|
||||
# computing the s[n-2] solution:
|
||||
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
|
||||
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
|
||||
|
||||
# s is the solution of the (n, n) system:
|
||||
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
|
||||
s[:-2] = s1 + s_m1 * s2
|
||||
s[-2] = s_m1
|
||||
s[-1] = s[0]
|
||||
else:
|
||||
if bc_start == 'not-a-knot':
|
||||
A[1, 0] = dx[1]
|
||||
A[0, 1] = x[2] - x[0]
|
||||
d = x[2] - x[0]
|
||||
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
|
||||
dxr[0]**2 * slope[1]) / d
|
||||
elif bc_start[0] == 1:
|
||||
A[1, 0] = 1
|
||||
A[0, 1] = 0
|
||||
b[0] = bc_start[1]
|
||||
elif bc_start[0] == 2:
|
||||
A[1, 0] = 2 * dx[0]
|
||||
A[0, 1] = dx[0]
|
||||
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
|
||||
|
||||
if bc_end == 'not-a-knot':
|
||||
A[1, -1] = dx[-2]
|
||||
A[-1, -2] = x[-1] - x[-3]
|
||||
d = x[-1] - x[-3]
|
||||
b[-1] = ((dxr[-1]**2*slope[-2] +
|
||||
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
|
||||
elif bc_end[0] == 1:
|
||||
A[1, -1] = 1
|
||||
A[-1, -2] = 0
|
||||
b[-1] = bc_end[1]
|
||||
elif bc_end[0] == 2:
|
||||
A[1, -1] = 2 * dx[-1]
|
||||
A[-1, -2] = dx[-1]
|
||||
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
|
||||
|
||||
s = solve_banded((1, 1), A, b, overwrite_ab=True,
|
||||
overwrite_b=True, check_finite=False)
|
||||
|
||||
super().__init__(x, y, s, axis=0, extrapolate=extrapolate)
|
||||
self.axis = axis
|
||||
|
||||
@staticmethod
|
||||
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
|
||||
"""Validate and prepare boundary conditions.
|
||||
|
||||
Returns
|
||||
-------
|
||||
validated_bc : 2-tuple
|
||||
Boundary conditions for a curve start and end.
|
||||
y : ndarray
|
||||
y casted to complex dtype if one of the boundary conditions has
|
||||
complex dtype.
|
||||
"""
|
||||
if isinstance(bc_type, str):
|
||||
if bc_type == 'periodic':
|
||||
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
|
||||
raise ValueError(
|
||||
f"The first and last `y` point along axis {axis} must "
|
||||
"be identical (within machine precision) when "
|
||||
"bc_type='periodic'.")
|
||||
|
||||
bc_type = (bc_type, bc_type)
|
||||
|
||||
else:
|
||||
if len(bc_type) != 2:
|
||||
raise ValueError("`bc_type` must contain 2 elements to "
|
||||
"specify start and end conditions.")
|
||||
|
||||
if 'periodic' in bc_type:
|
||||
raise ValueError("'periodic' `bc_type` is defined for both "
|
||||
"curve ends and cannot be used with other "
|
||||
"boundary conditions.")
|
||||
|
||||
validated_bc = []
|
||||
for bc in bc_type:
|
||||
if isinstance(bc, str):
|
||||
if bc == 'clamped':
|
||||
validated_bc.append((1, np.zeros(expected_deriv_shape)))
|
||||
elif bc == 'natural':
|
||||
validated_bc.append((2, np.zeros(expected_deriv_shape)))
|
||||
elif bc in ['not-a-knot', 'periodic']:
|
||||
validated_bc.append(bc)
|
||||
else:
|
||||
raise ValueError(f"bc_type={bc} is not allowed.")
|
||||
else:
|
||||
try:
|
||||
deriv_order, deriv_value = bc
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
"A specified derivative value must be "
|
||||
"given in the form (order, value)."
|
||||
) from e
|
||||
|
||||
if deriv_order not in [1, 2]:
|
||||
raise ValueError("The specified derivative order must "
|
||||
"be 1 or 2.")
|
||||
|
||||
deriv_value = np.asarray(deriv_value)
|
||||
if deriv_value.shape != expected_deriv_shape:
|
||||
raise ValueError(
|
||||
f"`deriv_value` shape {deriv_value.shape} is not "
|
||||
f"the expected one {expected_deriv_shape}."
|
||||
)
|
||||
|
||||
if np.issubdtype(deriv_value.dtype, np.complexfloating):
|
||||
y = y.astype(complex, copy=False)
|
||||
|
||||
validated_bc.append((deriv_order, deriv_value))
|
||||
|
||||
return validated_bc, y
|
||||
Binary file not shown.
Binary file not shown.
2362
venv/lib/python3.12/site-packages/scipy/interpolate/_fitpack2.py
Normal file
2362
venv/lib/python3.12/site-packages/scipy/interpolate/_fitpack2.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,805 @@
|
||||
"""
|
||||
fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
|
||||
FITPACK is a collection of FORTRAN programs for curve and surface
|
||||
fitting with splines and tensor product splines.
|
||||
|
||||
See
|
||||
https://web.archive.org/web/20010524124604/http://www.cs.kuleuven.ac.be:80/cwis/research/nalag/research/topics/fitpack.html
|
||||
or
|
||||
http://www.netlib.org/dierckx/
|
||||
|
||||
Copyright 2002 Pearu Peterson all rights reserved,
|
||||
Pearu Peterson <pearu@cens.ioc.ee>
|
||||
Permission to use, modify, and distribute this software is given under the
|
||||
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
|
||||
this distribution for specifics.
|
||||
|
||||
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
|
||||
|
||||
TODO: Make interfaces to the following fitpack functions:
|
||||
For univariate splines: cocosp, concon, fourco, insert
|
||||
For bivariate splines: profil, regrid, parsur, surev
|
||||
"""
|
||||
|
||||
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
|
||||
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
|
||||
|
||||
import warnings
|
||||
import numpy as np
|
||||
from . import _fitpack
|
||||
from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
|
||||
empty, iinfo, asarray)
|
||||
|
||||
# Try to replace _fitpack interface with
|
||||
# f2py-generated version
|
||||
from . import _dfitpack as dfitpack
|
||||
|
||||
|
||||
dfitpack_int = dfitpack.types.intvar.dtype
|
||||
|
||||
|
||||
def _int_overflow(x, exception, msg=None):
|
||||
"""Cast the value to an dfitpack_int and raise an OverflowError if the value
|
||||
cannot fit.
|
||||
"""
|
||||
if x > iinfo(dfitpack_int).max:
|
||||
if msg is None:
|
||||
msg = f'{x!r} cannot fit into an {dfitpack_int!r}'
|
||||
raise exception(msg)
|
||||
return dfitpack_int.type(x)
|
||||
|
||||
|
||||
_iermess = {
|
||||
0: ["The spline has a residual sum of squares fp such that "
|
||||
"abs(fp-s)/s<=0.001", None],
|
||||
-1: ["The spline is an interpolating spline (fp=0)", None],
|
||||
-2: ["The spline is weighted least-squares polynomial of degree k.\n"
|
||||
"fp gives the upper bound fp0 for the smoothing factor s", None],
|
||||
1: ["The required storage space exceeds the available storage space.\n"
|
||||
"Probable causes: data (x,y) size is too small or smoothing parameter"
|
||||
"\ns is too small (fp>s).", ValueError],
|
||||
2: ["A theoretically impossible result when finding a smoothing spline\n"
|
||||
"with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
|
||||
ValueError],
|
||||
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
|
||||
"spline with fp=s has been reached. Probable cause: s too small.\n"
|
||||
"(abs(fp-s)/s>0.001)", ValueError],
|
||||
10: ["Error on input data", ValueError],
|
||||
'unknown': ["An error occurred", TypeError]
|
||||
}
|
||||
|
||||
_iermess2 = {
|
||||
0: ["The spline has a residual sum of squares fp such that "
|
||||
"abs(fp-s)/s<=0.001", None],
|
||||
-1: ["The spline is an interpolating spline (fp=0)", None],
|
||||
-2: ["The spline is weighted least-squares polynomial of degree kx and ky."
|
||||
"\nfp gives the upper bound fp0 for the smoothing factor s", None],
|
||||
-3: ["Warning. The coefficients of the spline have been computed as the\n"
|
||||
"minimal norm least-squares solution of a rank deficient system.",
|
||||
None],
|
||||
1: ["The required storage space exceeds the available storage space.\n"
|
||||
"Probable causes: nxest or nyest too small or s is too small. (fp>s)",
|
||||
ValueError],
|
||||
2: ["A theoretically impossible result when finding a smoothing spline\n"
|
||||
"with fp = s. Probable causes: s too small or badly chosen eps.\n"
|
||||
"(abs(fp-s)/s>0.001)", ValueError],
|
||||
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
|
||||
"spline with fp=s has been reached. Probable cause: s too small.\n"
|
||||
"(abs(fp-s)/s>0.001)", ValueError],
|
||||
4: ["No more knots can be added because the number of B-spline\n"
|
||||
"coefficients already exceeds the number of data points m.\n"
|
||||
"Probable causes: either s or m too small. (fp>s)", ValueError],
|
||||
5: ["No more knots can be added because the additional knot would\n"
|
||||
"coincide with an old one. Probable cause: s too small or too large\n"
|
||||
"a weight to an inaccurate data point. (fp>s)", ValueError],
|
||||
10: ["Error on input data", ValueError],
|
||||
11: ["rwrk2 too small, i.e., there is not enough workspace for computing\n"
|
||||
"the minimal least-squares solution of a rank deficient system of\n"
|
||||
"linear equations.", ValueError],
|
||||
'unknown': ["An error occurred", TypeError]
|
||||
}
|
||||
|
||||
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
|
||||
'iwrk': array([], dfitpack_int), 'u': array([], float),
|
||||
'ub': 0, 'ue': 1}
|
||||
|
||||
|
||||
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
|
||||
full_output=0, nest=None, per=0, quiet=1):
|
||||
# see the docstring of `_fitpack_py/splprep`
|
||||
if task <= 0:
|
||||
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
|
||||
'iwrk': array([], dfitpack_int), 'u': array([], float),
|
||||
'ub': 0, 'ue': 1}
|
||||
x = atleast_1d(x)
|
||||
idim, m = x.shape
|
||||
if per:
|
||||
for i in range(idim):
|
||||
if x[i][0] != x[i][-1]:
|
||||
if not quiet:
|
||||
warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
|
||||
(i, m, i)),
|
||||
stacklevel=2)
|
||||
x[i][-1] = x[i][0]
|
||||
if not 0 < idim < 11:
|
||||
raise TypeError('0 < idim < 11 must hold')
|
||||
if w is None:
|
||||
w = ones(m, float)
|
||||
else:
|
||||
w = atleast_1d(w)
|
||||
ipar = (u is not None)
|
||||
if ipar:
|
||||
_parcur_cache['u'] = u
|
||||
if ub is None:
|
||||
_parcur_cache['ub'] = u[0]
|
||||
else:
|
||||
_parcur_cache['ub'] = ub
|
||||
if ue is None:
|
||||
_parcur_cache['ue'] = u[-1]
|
||||
else:
|
||||
_parcur_cache['ue'] = ue
|
||||
else:
|
||||
_parcur_cache['u'] = zeros(m, float)
|
||||
if not (1 <= k <= 5):
|
||||
raise TypeError('1 <= k= %d <=5 must hold' % k)
|
||||
if not (-1 <= task <= 1):
|
||||
raise TypeError('task must be -1, 0 or 1')
|
||||
if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
|
||||
raise TypeError('Mismatch of input dimensions')
|
||||
if s is None:
|
||||
s = m - sqrt(2*m)
|
||||
if t is None and task == -1:
|
||||
raise TypeError('Knots must be given for task=-1')
|
||||
if t is not None:
|
||||
_parcur_cache['t'] = atleast_1d(t)
|
||||
n = len(_parcur_cache['t'])
|
||||
if task == -1 and n < 2*k + 2:
|
||||
raise TypeError('There must be at least 2*k+2 knots for task=-1')
|
||||
if m <= k:
|
||||
raise TypeError('m > k must hold')
|
||||
if nest is None:
|
||||
nest = m + 2*k
|
||||
|
||||
if (task >= 0 and s == 0) or (nest < 0):
|
||||
if per:
|
||||
nest = m + 2*k
|
||||
else:
|
||||
nest = m + k + 1
|
||||
nest = max(nest, 2*k + 3)
|
||||
u = _parcur_cache['u']
|
||||
ub = _parcur_cache['ub']
|
||||
ue = _parcur_cache['ue']
|
||||
t = _parcur_cache['t']
|
||||
wrk = _parcur_cache['wrk']
|
||||
iwrk = _parcur_cache['iwrk']
|
||||
t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
|
||||
task, ipar, s, t, nest, wrk, iwrk, per)
|
||||
_parcur_cache['u'] = o['u']
|
||||
_parcur_cache['ub'] = o['ub']
|
||||
_parcur_cache['ue'] = o['ue']
|
||||
_parcur_cache['t'] = t
|
||||
_parcur_cache['wrk'] = o['wrk']
|
||||
_parcur_cache['iwrk'] = o['iwrk']
|
||||
ier = o['ier']
|
||||
fp = o['fp']
|
||||
n = len(t)
|
||||
u = o['u']
|
||||
c.shape = idim, n - k - 1
|
||||
tcku = [t, list(c), k], u
|
||||
if ier <= 0 and not quiet:
|
||||
warnings.warn(RuntimeWarning(_iermess[ier][0] +
|
||||
"\tk=%d n=%d m=%d fp=%f s=%f" %
|
||||
(k, len(t), m, fp, s)),
|
||||
stacklevel=2)
|
||||
if ier > 0 and not full_output:
|
||||
if ier in [1, 2, 3]:
|
||||
warnings.warn(RuntimeWarning(_iermess[ier][0]), stacklevel=2)
|
||||
else:
|
||||
try:
|
||||
raise _iermess[ier][1](_iermess[ier][0])
|
||||
except KeyError as e:
|
||||
raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
|
||||
if full_output:
|
||||
try:
|
||||
return tcku, fp, ier, _iermess[ier][0]
|
||||
except KeyError:
|
||||
return tcku, fp, ier, _iermess['unknown'][0]
|
||||
else:
|
||||
return tcku
|
||||
|
||||
|
||||
_curfit_cache = {'t': array([], float), 'wrk': array([], float),
|
||||
'iwrk': array([], dfitpack_int)}
|
||||
|
||||
|
||||
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
|
||||
full_output=0, per=0, quiet=1):
|
||||
# see the docstring of `_fitpack_py/splrep`
|
||||
if task <= 0:
|
||||
_curfit_cache = {}
|
||||
x, y = map(atleast_1d, [x, y])
|
||||
m = len(x)
|
||||
if w is None:
|
||||
w = ones(m, float)
|
||||
if s is None:
|
||||
s = 0.0
|
||||
else:
|
||||
w = atleast_1d(w)
|
||||
if s is None:
|
||||
s = m - sqrt(2*m)
|
||||
if not len(w) == m:
|
||||
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
|
||||
if (m != len(y)) or (m != len(w)):
|
||||
raise TypeError('Lengths of the first three arguments (x,y,w) must '
|
||||
'be equal')
|
||||
if not (1 <= k <= 5):
|
||||
raise TypeError('Given degree of the spline (k=%d) is not supported. '
|
||||
'(1<=k<=5)' % k)
|
||||
if m <= k:
|
||||
raise TypeError('m > k must hold')
|
||||
if xb is None:
|
||||
xb = x[0]
|
||||
if xe is None:
|
||||
xe = x[-1]
|
||||
if not (-1 <= task <= 1):
|
||||
raise TypeError('task must be -1, 0 or 1')
|
||||
if t is not None:
|
||||
task = -1
|
||||
if task == -1:
|
||||
if t is None:
|
||||
raise TypeError('Knots must be given for task=-1')
|
||||
numknots = len(t)
|
||||
_curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
|
||||
_curfit_cache['t'][k+1:-k-1] = t
|
||||
nest = len(_curfit_cache['t'])
|
||||
elif task == 0:
|
||||
if per:
|
||||
nest = max(m + 2*k, 2*k + 3)
|
||||
else:
|
||||
nest = max(m + k + 1, 2*k + 3)
|
||||
t = empty((nest,), float)
|
||||
_curfit_cache['t'] = t
|
||||
if task <= 0:
|
||||
if per:
|
||||
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
|
||||
else:
|
||||
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
|
||||
_curfit_cache['iwrk'] = empty((nest,), dfitpack_int)
|
||||
try:
|
||||
t = _curfit_cache['t']
|
||||
wrk = _curfit_cache['wrk']
|
||||
iwrk = _curfit_cache['iwrk']
|
||||
except KeyError as e:
|
||||
raise TypeError("must call with task=1 only after"
|
||||
" call with task=0,-1") from e
|
||||
if not per:
|
||||
n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
|
||||
xb, xe, k, s)
|
||||
else:
|
||||
n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
|
||||
tck = (t[:n], c[:n], k)
|
||||
if ier <= 0 and not quiet:
|
||||
_mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
|
||||
(k, len(t), m, fp, s))
|
||||
warnings.warn(RuntimeWarning(_mess), stacklevel=2)
|
||||
if ier > 0 and not full_output:
|
||||
if ier in [1, 2, 3]:
|
||||
warnings.warn(RuntimeWarning(_iermess[ier][0]), stacklevel=2)
|
||||
else:
|
||||
try:
|
||||
raise _iermess[ier][1](_iermess[ier][0])
|
||||
except KeyError as e:
|
||||
raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
|
||||
if full_output:
|
||||
try:
|
||||
return tck, fp, ier, _iermess[ier][0]
|
||||
except KeyError:
|
||||
return tck, fp, ier, _iermess['unknown'][0]
|
||||
else:
|
||||
return tck
|
||||
|
||||
|
||||
def splev(x, tck, der=0, ext=0):
|
||||
# see the docstring of `_fitpack_py/splev`
|
||||
t, c, k = tck
|
||||
try:
|
||||
c[0][0]
|
||||
parametric = True
|
||||
except Exception:
|
||||
parametric = False
|
||||
if parametric:
|
||||
return list(map(lambda c, x=x, t=t, k=k, der=der:
|
||||
splev(x, [t, c, k], der, ext), c))
|
||||
else:
|
||||
if not (0 <= der <= k):
|
||||
raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
|
||||
if ext not in (0, 1, 2, 3):
|
||||
raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
|
||||
|
||||
x = asarray(x)
|
||||
shape = x.shape
|
||||
x = atleast_1d(x).ravel()
|
||||
if der == 0:
|
||||
y, ier = dfitpack.splev(t, c, k, x, ext)
|
||||
else:
|
||||
y, ier = dfitpack.splder(t, c, k, x, der, ext)
|
||||
|
||||
if ier == 10:
|
||||
raise ValueError("Invalid input data")
|
||||
if ier == 1:
|
||||
raise ValueError("Found x value not in the domain")
|
||||
if ier:
|
||||
raise TypeError("An error occurred")
|
||||
|
||||
return y.reshape(shape)
|
||||
|
||||
|
||||
def splint(a, b, tck, full_output=0):
|
||||
# see the docstring of `_fitpack_py/splint`
|
||||
t, c, k = tck
|
||||
try:
|
||||
c[0][0]
|
||||
parametric = True
|
||||
except Exception:
|
||||
parametric = False
|
||||
if parametric:
|
||||
return list(map(lambda c, a=a, b=b, t=t, k=k:
|
||||
splint(a, b, [t, c, k]), c))
|
||||
else:
|
||||
aint, wrk = dfitpack.splint(t, c, k, a, b)
|
||||
if full_output:
|
||||
return aint, wrk
|
||||
else:
|
||||
return aint
|
||||
|
||||
|
||||
def sproot(tck, mest=10):
|
||||
# see the docstring of `_fitpack_py/sproot`
|
||||
t, c, k = tck
|
||||
if k != 3:
|
||||
raise ValueError("sproot works only for cubic (k=3) splines")
|
||||
try:
|
||||
c[0][0]
|
||||
parametric = True
|
||||
except Exception:
|
||||
parametric = False
|
||||
if parametric:
|
||||
return list(map(lambda c, t=t, k=k, mest=mest:
|
||||
sproot([t, c, k], mest), c))
|
||||
else:
|
||||
if len(t) < 8:
|
||||
raise TypeError("The number of knots %d>=8" % len(t))
|
||||
z, m, ier = dfitpack.sproot(t, c, mest)
|
||||
if ier == 10:
|
||||
raise TypeError("Invalid input data. "
|
||||
"t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
|
||||
if ier == 0:
|
||||
return z[:m]
|
||||
if ier == 1:
|
||||
warnings.warn(RuntimeWarning("The number of zeros exceeds mest"),
|
||||
stacklevel=2)
|
||||
return z[:m]
|
||||
raise TypeError("Unknown error")
|
||||
|
||||
|
||||
def spalde(x, tck):
|
||||
# see the docstring of `_fitpack_py/spalde`
|
||||
t, c, k = tck
|
||||
try:
|
||||
c[0][0]
|
||||
parametric = True
|
||||
except Exception:
|
||||
parametric = False
|
||||
if parametric:
|
||||
return list(map(lambda c, x=x, t=t, k=k:
|
||||
spalde(x, [t, c, k]), c))
|
||||
else:
|
||||
x = atleast_1d(x)
|
||||
if len(x) > 1:
|
||||
return list(map(lambda x, tck=tck: spalde(x, tck), x))
|
||||
d, ier = dfitpack.spalde(t, c, k+1, x[0])
|
||||
if ier == 0:
|
||||
return d
|
||||
if ier == 10:
|
||||
raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
|
||||
raise TypeError("Unknown error")
|
||||
|
||||
# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
|
||||
# full_output=0,nest=None,per=0,quiet=1):
|
||||
|
||||
|
||||
_surfit_cache = {'tx': array([], float), 'ty': array([], float),
|
||||
'wrk': array([], float), 'iwrk': array([], dfitpack_int)}
|
||||
|
||||
|
||||
def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
|
||||
kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
|
||||
full_output=0, nxest=None, nyest=None, quiet=1):
|
||||
"""
|
||||
Find a bivariate B-spline representation of a surface.
|
||||
|
||||
Given a set of data points (x[i], y[i], z[i]) representing a surface
|
||||
z=f(x,y), compute a B-spline representation of the surface. Based on
|
||||
the routine SURFIT from FITPACK.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x, y, z : ndarray
|
||||
Rank-1 arrays of data points.
|
||||
w : ndarray, optional
|
||||
Rank-1 array of weights. By default ``w=np.ones(len(x))``.
|
||||
xb, xe : float, optional
|
||||
End points of approximation interval in `x`.
|
||||
By default ``xb = x.min(), xe=x.max()``.
|
||||
yb, ye : float, optional
|
||||
End points of approximation interval in `y`.
|
||||
By default ``yb=y.min(), ye = y.max()``.
|
||||
kx, ky : int, optional
|
||||
The degrees of the spline (1 <= kx, ky <= 5).
|
||||
Third order (kx=ky=3) is recommended.
|
||||
task : int, optional
|
||||
If task=0, find knots in x and y and coefficients for a given
|
||||
smoothing factor, s.
|
||||
If task=1, find knots and coefficients for another value of the
|
||||
smoothing factor, s. bisplrep must have been previously called
|
||||
with task=0 or task=1.
|
||||
If task=-1, find coefficients for a given set of knots tx, ty.
|
||||
s : float, optional
|
||||
A non-negative smoothing factor. If weights correspond
|
||||
to the inverse of the standard-deviation of the errors in z,
|
||||
then a good s-value should be found in the range
|
||||
``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
|
||||
eps : float, optional
|
||||
A threshold for determining the effective rank of an
|
||||
over-determined linear system of equations (0 < eps < 1).
|
||||
`eps` is not likely to need changing.
|
||||
tx, ty : ndarray, optional
|
||||
Rank-1 arrays of the knots of the spline for task=-1
|
||||
full_output : int, optional
|
||||
Non-zero to return optional outputs.
|
||||
nxest, nyest : int, optional
|
||||
Over-estimates of the total number of knots. If None then
|
||||
``nxest = max(kx+sqrt(m/2),2*kx+3)``,
|
||||
``nyest = max(ky+sqrt(m/2),2*ky+3)``.
|
||||
quiet : int, optional
|
||||
Non-zero to suppress printing of messages.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tck : array_like
|
||||
A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
|
||||
coefficients (c) of the bivariate B-spline representation of the
|
||||
surface along with the degree of the spline.
|
||||
fp : ndarray
|
||||
The weighted sum of squared residuals of the spline approximation.
|
||||
ier : int
|
||||
An integer flag about splrep success. Success is indicated if
|
||||
ier<=0. If ier in [1,2,3] an error occurred but was not raised.
|
||||
Otherwise an error is raised.
|
||||
msg : str
|
||||
A message corresponding to the integer flag, ier.
|
||||
|
||||
See Also
|
||||
--------
|
||||
splprep, splrep, splint, sproot, splev
|
||||
UnivariateSpline, BivariateSpline
|
||||
|
||||
Notes
|
||||
-----
|
||||
See `bisplev` to evaluate the value of the B-spline given its tck
|
||||
representation.
|
||||
|
||||
If the input data is such that input dimensions have incommensurate
|
||||
units and differ by many orders of magnitude, the interpolant may have
|
||||
numerical artifacts. Consider rescaling the data before interpolation.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Dierckx P.:An algorithm for surface fitting with spline functions
|
||||
Ima J. Numer. Anal. 1 (1981) 267-283.
|
||||
.. [2] Dierckx P.:An algorithm for surface fitting with spline functions
|
||||
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
|
||||
.. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
|
||||
Numerical Analysis, Oxford University Press, 1993.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Examples are given :ref:`in the tutorial <tutorial-interpolate_2d_spline>`.
|
||||
|
||||
"""
|
||||
x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
|
||||
m = len(x)
|
||||
if not (m == len(y) == len(z)):
|
||||
raise TypeError('len(x)==len(y)==len(z) must hold.')
|
||||
if w is None:
|
||||
w = ones(m, float)
|
||||
else:
|
||||
w = atleast_1d(w)
|
||||
if not len(w) == m:
|
||||
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
|
||||
if xb is None:
|
||||
xb = x.min()
|
||||
if xe is None:
|
||||
xe = x.max()
|
||||
if yb is None:
|
||||
yb = y.min()
|
||||
if ye is None:
|
||||
ye = y.max()
|
||||
if not (-1 <= task <= 1):
|
||||
raise TypeError('task must be -1, 0 or 1')
|
||||
if s is None:
|
||||
s = m - sqrt(2*m)
|
||||
if tx is None and task == -1:
|
||||
raise TypeError('Knots_x must be given for task=-1')
|
||||
if tx is not None:
|
||||
_surfit_cache['tx'] = atleast_1d(tx)
|
||||
nx = len(_surfit_cache['tx'])
|
||||
if ty is None and task == -1:
|
||||
raise TypeError('Knots_y must be given for task=-1')
|
||||
if ty is not None:
|
||||
_surfit_cache['ty'] = atleast_1d(ty)
|
||||
ny = len(_surfit_cache['ty'])
|
||||
if task == -1 and nx < 2*kx+2:
|
||||
raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
|
||||
if task == -1 and ny < 2*ky+2:
|
||||
raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
|
||||
if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
|
||||
raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
|
||||
'supported. (1<=k<=5)' % (kx, ky))
|
||||
if m < (kx + 1)*(ky + 1):
|
||||
raise TypeError('m >= (kx+1)(ky+1) must hold')
|
||||
if nxest is None:
|
||||
nxest = int(kx + sqrt(m/2))
|
||||
if nyest is None:
|
||||
nyest = int(ky + sqrt(m/2))
|
||||
nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
|
||||
if task >= 0 and s == 0:
|
||||
nxest = int(kx + sqrt(3*m))
|
||||
nyest = int(ky + sqrt(3*m))
|
||||
if task == -1:
|
||||
_surfit_cache['tx'] = atleast_1d(tx)
|
||||
_surfit_cache['ty'] = atleast_1d(ty)
|
||||
tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
|
||||
wrk = _surfit_cache['wrk']
|
||||
u = nxest - kx - 1
|
||||
v = nyest - ky - 1
|
||||
km = max(kx, ky) + 1
|
||||
ne = max(nxest, nyest)
|
||||
bx, by = kx*v + ky + 1, ky*u + kx + 1
|
||||
b1, b2 = bx, bx + v - ky
|
||||
if bx > by:
|
||||
b1, b2 = by, by + u - kx
|
||||
msg = "Too many data points to interpolate"
|
||||
lwrk1 = _int_overflow(u*v*(2 + b1 + b2) +
|
||||
2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
|
||||
OverflowError,
|
||||
msg=msg)
|
||||
lwrk2 = _int_overflow(u*v*(b2 + 1) + b2, OverflowError, msg=msg)
|
||||
tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
|
||||
task, s, eps, tx, ty, nxest, nyest,
|
||||
wrk, lwrk1, lwrk2)
|
||||
_curfit_cache['tx'] = tx
|
||||
_curfit_cache['ty'] = ty
|
||||
_curfit_cache['wrk'] = o['wrk']
|
||||
ier, fp = o['ier'], o['fp']
|
||||
tck = [tx, ty, c, kx, ky]
|
||||
|
||||
ierm = min(11, max(-3, ier))
|
||||
if ierm <= 0 and not quiet:
|
||||
_mess = (_iermess2[ierm][0] +
|
||||
"\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
|
||||
(kx, ky, len(tx), len(ty), m, fp, s))
|
||||
warnings.warn(RuntimeWarning(_mess), stacklevel=2)
|
||||
if ierm > 0 and not full_output:
|
||||
if ier in [1, 2, 3, 4, 5]:
|
||||
_mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
|
||||
(kx, ky, len(tx), len(ty), m, fp, s))
|
||||
warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess), stacklevel=2)
|
||||
else:
|
||||
try:
|
||||
raise _iermess2[ierm][1](_iermess2[ierm][0])
|
||||
except KeyError as e:
|
||||
raise _iermess2['unknown'][1](_iermess2['unknown'][0]) from e
|
||||
if full_output:
|
||||
try:
|
||||
return tck, fp, ier, _iermess2[ierm][0]
|
||||
except KeyError:
|
||||
return tck, fp, ier, _iermess2['unknown'][0]
|
||||
else:
|
||||
return tck
|
||||
|
||||
|
||||
def bisplev(x, y, tck, dx=0, dy=0):
|
||||
"""
|
||||
Evaluate a bivariate B-spline and its derivatives.
|
||||
|
||||
Return a rank-2 array of spline function values (or spline derivative
|
||||
values) at points given by the cross-product of the rank-1 arrays `x` and
|
||||
`y`. In special cases, return an array or just a float if either `x` or
|
||||
`y` or both are floats. Based on BISPEV and PARDER from FITPACK.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x, y : ndarray
|
||||
Rank-1 arrays specifying the domain over which to evaluate the
|
||||
spline or its derivative.
|
||||
tck : tuple
|
||||
A sequence of length 5 returned by `bisplrep` containing the knot
|
||||
locations, the coefficients, and the degree of the spline:
|
||||
[tx, ty, c, kx, ky].
|
||||
dx, dy : int, optional
|
||||
The orders of the partial derivatives in `x` and `y` respectively.
|
||||
|
||||
Returns
|
||||
-------
|
||||
vals : ndarray
|
||||
The B-spline or its derivative evaluated over the set formed by
|
||||
the cross-product of `x` and `y`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
splprep, splrep, splint, sproot, splev
|
||||
UnivariateSpline, BivariateSpline
|
||||
|
||||
Notes
|
||||
-----
|
||||
See `bisplrep` to generate the `tck` representation.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Dierckx P. : An algorithm for surface fitting
|
||||
with spline functions
|
||||
Ima J. Numer. Anal. 1 (1981) 267-283.
|
||||
.. [2] Dierckx P. : An algorithm for surface fitting
|
||||
with spline functions
|
||||
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
|
||||
.. [3] Dierckx P. : Curve and surface fitting with splines,
|
||||
Monographs on Numerical Analysis, Oxford University Press, 1993.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Examples are given :ref:`in the tutorial <tutorial-interpolate_2d_spline>`.
|
||||
|
||||
"""
|
||||
tx, ty, c, kx, ky = tck
|
||||
if not (0 <= dx < kx):
|
||||
raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
|
||||
if not (0 <= dy < ky):
|
||||
raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
|
||||
x, y = map(atleast_1d, [x, y])
|
||||
if (len(x.shape) != 1) or (len(y.shape) != 1):
|
||||
raise ValueError("First two entries should be rank-1 arrays.")
|
||||
|
||||
msg = "Too many data points to interpolate."
|
||||
|
||||
_int_overflow(x.size * y.size, MemoryError, msg=msg)
|
||||
|
||||
if dx != 0 or dy != 0:
|
||||
_int_overflow((tx.size - kx - 1)*(ty.size - ky - 1),
|
||||
MemoryError, msg=msg)
|
||||
z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
|
||||
else:
|
||||
z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
|
||||
|
||||
if ier == 10:
|
||||
raise ValueError("Invalid input data")
|
||||
if ier:
|
||||
raise TypeError("An error occurred")
|
||||
z.shape = len(x), len(y)
|
||||
if len(z) > 1:
|
||||
return z
|
||||
if len(z[0]) > 1:
|
||||
return z[0]
|
||||
return z[0][0]
|
||||
|
||||
|
||||
def dblint(xa, xb, ya, yb, tck):
|
||||
"""Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
|
||||
|
||||
Parameters
|
||||
----------
|
||||
xa, xb : float
|
||||
The end-points of the x integration interval.
|
||||
ya, yb : float
|
||||
The end-points of the y integration interval.
|
||||
tck : list [tx, ty, c, kx, ky]
|
||||
A sequence of length 5 returned by bisplrep containing the knot
|
||||
locations tx, ty, the coefficients c, and the degrees kx, ky
|
||||
of the spline.
|
||||
|
||||
Returns
|
||||
-------
|
||||
integ : float
|
||||
The value of the resulting integral.
|
||||
"""
|
||||
tx, ty, c, kx, ky = tck
|
||||
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
|
||||
|
||||
|
||||
def insert(x, tck, m=1, per=0):
|
||||
# see the docstring of `_fitpack_py/insert`
|
||||
t, c, k = tck
|
||||
try:
|
||||
c[0][0]
|
||||
parametric = True
|
||||
except Exception:
|
||||
parametric = False
|
||||
if parametric:
|
||||
cc = []
|
||||
for c_vals in c:
|
||||
tt, cc_val, kk = insert(x, [t, c_vals, k], m)
|
||||
cc.append(cc_val)
|
||||
return (tt, cc, kk)
|
||||
else:
|
||||
tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
|
||||
if ier == 10:
|
||||
raise ValueError("Invalid input data")
|
||||
if ier:
|
||||
raise TypeError("An error occurred")
|
||||
return (tt, cc, k)
|
||||
|
||||
|
||||
def splder(tck, n=1):
|
||||
# see the docstring of `_fitpack_py/splder`
|
||||
if n < 0:
|
||||
return splantider(tck, -n)
|
||||
|
||||
t, c, k = tck
|
||||
|
||||
if n > k:
|
||||
raise ValueError(f"Order of derivative (n = {n!r}) must be <= "
|
||||
f"order of spline (k = {tck[2]!r})")
|
||||
|
||||
# Extra axes for the trailing dims of the `c` array:
|
||||
sh = (slice(None),) + ((None,)*len(c.shape[1:]))
|
||||
|
||||
with np.errstate(invalid='raise', divide='raise'):
|
||||
try:
|
||||
for j in range(n):
|
||||
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
|
||||
|
||||
# Compute the denominator in the differentiation formula.
|
||||
# (and append trailing dims, if necessary)
|
||||
dt = t[k+1:-1] - t[1:-k-1]
|
||||
dt = dt[sh]
|
||||
# Compute the new coefficients
|
||||
c = (c[1:-1-k] - c[:-2-k]) * k / dt
|
||||
# Pad coefficient array to same size as knots (FITPACK
|
||||
# convention)
|
||||
c = np.r_[c, np.zeros((k,) + c.shape[1:])]
|
||||
# Adjust knots
|
||||
t = t[1:-1]
|
||||
k -= 1
|
||||
except FloatingPointError as e:
|
||||
raise ValueError(("The spline has internal repeated knots "
|
||||
"and is not differentiable %d times") % n) from e
|
||||
|
||||
return t, c, k
|
||||
|
||||
|
||||
def splantider(tck, n=1):
|
||||
# see the docstring of `_fitpack_py/splantider`
|
||||
if n < 0:
|
||||
return splder(tck, -n)
|
||||
|
||||
t, c, k = tck
|
||||
|
||||
# Extra axes for the trailing dims of the `c` array:
|
||||
sh = (slice(None),) + (None,)*len(c.shape[1:])
|
||||
|
||||
for j in range(n):
|
||||
# This is the inverse set of operations to splder.
|
||||
|
||||
# Compute the multiplier in the antiderivative formula.
|
||||
dt = t[k+1:] - t[:-k-1]
|
||||
dt = dt[sh]
|
||||
# Compute the new coefficients
|
||||
c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
|
||||
c = np.r_[np.zeros((1,) + c.shape[1:]),
|
||||
c,
|
||||
[c[-1]] * (k+2)]
|
||||
# New knots
|
||||
t = np.r_[t[0], t, t[-1]]
|
||||
k += 1
|
||||
|
||||
return t, c, k
|
||||
@ -0,0 +1,854 @@
|
||||
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
|
||||
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
|
||||
|
||||
|
||||
import numpy as np
|
||||
|
||||
# These are in the API for fitpack even if not used in fitpack.py itself.
|
||||
from ._fitpack_impl import bisplrep, bisplev, dblint # noqa: F401
|
||||
from . import _fitpack_impl as _impl
|
||||
from ._bsplines import BSpline
|
||||
|
||||
|
||||
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
|
||||
full_output=0, nest=None, per=0, quiet=1):
|
||||
"""
|
||||
Find the B-spline representation of an N-D curve.
|
||||
|
||||
Given a list of N rank-1 arrays, `x`, which represent a curve in
|
||||
N-dimensional space parametrized by `u`, find a smooth approximating
|
||||
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
A list of sample vector arrays representing the curve.
|
||||
w : array_like, optional
|
||||
Strictly positive rank-1 array of weights the same length as `x[0]`.
|
||||
The weights are used in computing the weighted least-squares spline
|
||||
fit. If the errors in the `x` values have standard-deviation given by
|
||||
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
|
||||
u : array_like, optional
|
||||
An array of parameter values. If not given, these values are
|
||||
calculated automatically as ``M = len(x[0])``, where
|
||||
|
||||
v[0] = 0
|
||||
|
||||
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
|
||||
|
||||
u[i] = v[i] / v[M-1]
|
||||
|
||||
ub, ue : int, optional
|
||||
The end-points of the parameters interval. Defaults to
|
||||
u[0] and u[-1].
|
||||
k : int, optional
|
||||
Degree of the spline. Cubic splines are recommended.
|
||||
Even values of `k` should be avoided especially with a small s-value.
|
||||
``1 <= k <= 5``, default is 3.
|
||||
task : int, optional
|
||||
If task==0 (default), find t and c for a given smoothing factor, s.
|
||||
If task==1, find t and c for another value of the smoothing factor, s.
|
||||
There must have been a previous call with task=0 or task=1
|
||||
for the same set of data.
|
||||
If task=-1 find the weighted least square spline for a given set of
|
||||
knots, t.
|
||||
s : float, optional
|
||||
A smoothing condition. The amount of smoothness is determined by
|
||||
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
|
||||
where g(x) is the smoothed interpolation of (x,y). The user can
|
||||
use `s` to control the trade-off between closeness and smoothness
|
||||
of fit. Larger `s` means more smoothing while smaller values of `s`
|
||||
indicate less smoothing. Recommended values of `s` depend on the
|
||||
weights, w. If the weights represent the inverse of the
|
||||
standard-deviation of y, then a good `s` value should be found in
|
||||
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
|
||||
data points in x, y, and w.
|
||||
t : array, optional
|
||||
The knots needed for ``task=-1``.
|
||||
There must be at least ``2*k+2`` knots.
|
||||
full_output : int, optional
|
||||
If non-zero, then return optional outputs.
|
||||
nest : int, optional
|
||||
An over-estimate of the total number of knots of the spline to
|
||||
help in determining the storage space. By default nest=m/2.
|
||||
Always large enough is nest=m+k+1.
|
||||
per : int, optional
|
||||
If non-zero, data points are considered periodic with period
|
||||
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
|
||||
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
|
||||
quiet : int, optional
|
||||
Non-zero to suppress messages.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tck : tuple
|
||||
A tuple, ``(t,c,k)`` containing the vector of knots, the B-spline
|
||||
coefficients, and the degree of the spline.
|
||||
u : array
|
||||
An array of the values of the parameter.
|
||||
fp : float
|
||||
The weighted sum of squared residuals of the spline approximation.
|
||||
ier : int
|
||||
An integer flag about splrep success. Success is indicated
|
||||
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
|
||||
Otherwise an error is raised.
|
||||
msg : str
|
||||
A message corresponding to the integer flag, ier.
|
||||
|
||||
See Also
|
||||
--------
|
||||
splrep, splev, sproot, spalde, splint,
|
||||
bisplrep, bisplev
|
||||
UnivariateSpline, BivariateSpline
|
||||
BSpline
|
||||
make_interp_spline
|
||||
|
||||
Notes
|
||||
-----
|
||||
See `splev` for evaluation of the spline and its derivatives.
|
||||
The number of dimensions N must be smaller than 11.
|
||||
|
||||
The number of coefficients in the `c` array is ``k+1`` less than the number
|
||||
of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
|
||||
the array of coefficients to have the same length as the array of knots.
|
||||
These additional coefficients are ignored by evaluation routines, `splev`
|
||||
and `BSpline`.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
|
||||
parametric splines, Computer Graphics and Image Processing",
|
||||
20 (1982) 171-184.
|
||||
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
|
||||
parametric splines", report tw55, Dept. Computer Science,
|
||||
K.U.Leuven, 1981.
|
||||
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
|
||||
Numerical Analysis, Oxford University Press, 1993.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Generate a discretization of a limacon curve in the polar coordinates:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> phi = np.linspace(0, 2.*np.pi, 40)
|
||||
>>> r = 0.5 + np.cos(phi) # polar coords
|
||||
>>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
|
||||
|
||||
And interpolate:
|
||||
|
||||
>>> from scipy.interpolate import splprep, splev
|
||||
>>> tck, u = splprep([x, y], s=0)
|
||||
>>> new_points = splev(u, tck)
|
||||
|
||||
Notice that (i) we force interpolation by using `s=0`,
|
||||
(ii) the parameterization, ``u``, is generated automatically.
|
||||
Now plot the result:
|
||||
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> fig, ax = plt.subplots()
|
||||
>>> ax.plot(x, y, 'ro')
|
||||
>>> ax.plot(new_points[0], new_points[1], 'r-')
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
|
||||
res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
|
||||
quiet)
|
||||
return res
|
||||
|
||||
|
||||
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
|
||||
full_output=0, per=0, quiet=1):
|
||||
"""
|
||||
Find the B-spline representation of a 1-D curve.
|
||||
|
||||
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
|
||||
approximation of degree k on the interval ``xb <= x <= xe``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x, y : array_like
|
||||
The data points defining a curve ``y = f(x)``.
|
||||
w : array_like, optional
|
||||
Strictly positive rank-1 array of weights the same length as `x` and `y`.
|
||||
The weights are used in computing the weighted least-squares spline
|
||||
fit. If the errors in the `y` values have standard-deviation given by the
|
||||
vector ``d``, then `w` should be ``1/d``. Default is ``ones(len(x))``.
|
||||
xb, xe : float, optional
|
||||
The interval to fit. If None, these default to ``x[0]`` and ``x[-1]``
|
||||
respectively.
|
||||
k : int, optional
|
||||
The degree of the spline fit. It is recommended to use cubic splines.
|
||||
Even values of `k` should be avoided especially with small `s` values.
|
||||
``1 <= k <= 5``.
|
||||
task : {1, 0, -1}, optional
|
||||
If ``task==0``, find ``t`` and ``c`` for a given smoothing factor, `s`.
|
||||
|
||||
If ``task==1`` find ``t`` and ``c`` for another value of the smoothing factor,
|
||||
`s`. There must have been a previous call with ``task=0`` or ``task=1`` for
|
||||
the same set of data (``t`` will be stored an used internally)
|
||||
|
||||
If ``task=-1`` find the weighted least square spline for a given set of
|
||||
knots, ``t``. These should be interior knots as knots on the ends will be
|
||||
added automatically.
|
||||
s : float, optional
|
||||
A smoothing condition. The amount of smoothness is determined by
|
||||
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s`` where ``g(x)``
|
||||
is the smoothed interpolation of ``(x,y)``. The user can use `s` to control
|
||||
the tradeoff between closeness and smoothness of fit. Larger `s` means
|
||||
more smoothing while smaller values of `s` indicate less smoothing.
|
||||
Recommended values of `s` depend on the weights, `w`. If the weights
|
||||
represent the inverse of the standard-deviation of `y`, then a good `s`
|
||||
value should be found in the range ``(m-sqrt(2*m),m+sqrt(2*m))`` where ``m`` is
|
||||
the number of datapoints in `x`, `y`, and `w`. default : ``s=m-sqrt(2*m)`` if
|
||||
weights are supplied. ``s = 0.0`` (interpolating) if no weights are
|
||||
supplied.
|
||||
t : array_like, optional
|
||||
The knots needed for ``task=-1``. If given then task is automatically set
|
||||
to ``-1``.
|
||||
full_output : bool, optional
|
||||
If non-zero, then return optional outputs.
|
||||
per : bool, optional
|
||||
If non-zero, data points are considered periodic with period ``x[m-1]`` -
|
||||
``x[0]`` and a smooth periodic spline approximation is returned. Values of
|
||||
``y[m-1]`` and ``w[m-1]`` are not used.
|
||||
The default is zero, corresponding to boundary condition 'not-a-knot'.
|
||||
quiet : bool, optional
|
||||
Non-zero to suppress messages.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tck : tuple
|
||||
A tuple ``(t,c,k)`` containing the vector of knots, the B-spline
|
||||
coefficients, and the degree of the spline.
|
||||
fp : array, optional
|
||||
The weighted sum of squared residuals of the spline approximation.
|
||||
ier : int, optional
|
||||
An integer flag about splrep success. Success is indicated if ``ier<=0``.
|
||||
If ``ier in [1,2,3]``, an error occurred but was not raised. Otherwise an
|
||||
error is raised.
|
||||
msg : str, optional
|
||||
A message corresponding to the integer flag, `ier`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
UnivariateSpline, BivariateSpline
|
||||
splprep, splev, sproot, spalde, splint
|
||||
bisplrep, bisplev
|
||||
BSpline
|
||||
make_interp_spline
|
||||
|
||||
Notes
|
||||
-----
|
||||
See `splev` for evaluation of the spline and its derivatives. Uses the
|
||||
FORTRAN routine ``curfit`` from FITPACK.
|
||||
|
||||
The user is responsible for assuring that the values of `x` are unique.
|
||||
Otherwise, `splrep` will not return sensible results.
|
||||
|
||||
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
|
||||
i.e., there must be a subset of data points ``x[j]`` such that
|
||||
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
|
||||
|
||||
This routine zero-pads the coefficients array ``c`` to have the same length
|
||||
as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
|
||||
by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
|
||||
`splprep`, which does not zero-pad the coefficients.
|
||||
|
||||
The default boundary condition is 'not-a-knot', i.e. the first and second
|
||||
segment at a curve end are the same polynomial. More boundary conditions are
|
||||
available in `CubicSpline`.
|
||||
|
||||
References
|
||||
----------
|
||||
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
|
||||
|
||||
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
|
||||
integration of experimental data using spline functions",
|
||||
J.Comp.Appl.Maths 1 (1975) 165-184.
|
||||
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
|
||||
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
|
||||
1286-1304.
|
||||
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
|
||||
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
|
||||
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
|
||||
Numerical Analysis, Oxford University Press, 1993.
|
||||
|
||||
Examples
|
||||
--------
|
||||
You can interpolate 1-D points with a B-spline curve.
|
||||
Further examples are given in
|
||||
:ref:`in the tutorial <tutorial-interpolate_splXXX>`.
|
||||
|
||||
>>> import numpy as np
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> from scipy.interpolate import splev, splrep
|
||||
>>> x = np.linspace(0, 10, 10)
|
||||
>>> y = np.sin(x)
|
||||
>>> spl = splrep(x, y)
|
||||
>>> x2 = np.linspace(0, 10, 200)
|
||||
>>> y2 = splev(x2, spl)
|
||||
>>> plt.plot(x, y, 'o', x2, y2)
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
|
||||
return res
|
||||
|
||||
|
||||
def splev(x, tck, der=0, ext=0):
|
||||
"""
|
||||
Evaluate a B-spline or its derivatives.
|
||||
|
||||
Given the knots and coefficients of a B-spline representation, evaluate
|
||||
the value of the smoothing polynomial and its derivatives. This is a
|
||||
wrapper around the FORTRAN routines splev and splder of FITPACK.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
An array of points at which to return the value of the smoothed
|
||||
spline or its derivatives. If `tck` was returned from `splprep`,
|
||||
then the parameter values, u should be given.
|
||||
tck : BSpline instance or tuple
|
||||
If a tuple, then it should be a sequence of length 3 returned by
|
||||
`splrep` or `splprep` containing the knots, coefficients, and degree
|
||||
of the spline. (Also see Notes.)
|
||||
der : int, optional
|
||||
The order of derivative of the spline to compute (must be less than
|
||||
or equal to k, the degree of the spline).
|
||||
ext : int, optional
|
||||
Controls the value returned for elements of ``x`` not in the
|
||||
interval defined by the knot sequence.
|
||||
|
||||
* if ext=0, return the extrapolated value.
|
||||
* if ext=1, return 0
|
||||
* if ext=2, raise a ValueError
|
||||
* if ext=3, return the boundary value.
|
||||
|
||||
The default value is 0.
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : ndarray or list of ndarrays
|
||||
An array of values representing the spline function evaluated at
|
||||
the points in `x`. If `tck` was returned from `splprep`, then this
|
||||
is a list of arrays representing the curve in an N-D space.
|
||||
|
||||
See Also
|
||||
--------
|
||||
splprep, splrep, sproot, spalde, splint
|
||||
bisplrep, bisplev
|
||||
BSpline
|
||||
|
||||
Notes
|
||||
-----
|
||||
Manipulating the tck-tuples directly is not recommended. In new code,
|
||||
prefer using `BSpline` objects.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
|
||||
Theory, 6, p.50-62, 1972.
|
||||
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
|
||||
Applics, 10, p.134-149, 1972.
|
||||
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
|
||||
on Numerical Analysis, Oxford University Press, 1993.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
|
||||
|
||||
A comparison between `splev`, `splder` and `spalde` to compute the derivatives of a
|
||||
B-spline can be found in the `spalde` examples section.
|
||||
|
||||
"""
|
||||
if isinstance(tck, BSpline):
|
||||
if tck.c.ndim > 1:
|
||||
mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
|
||||
"not allowed. Use BSpline.__call__(x) instead.")
|
||||
raise ValueError(mesg)
|
||||
|
||||
# remap the out-of-bounds behavior
|
||||
try:
|
||||
extrapolate = {0: True, }[ext]
|
||||
except KeyError as e:
|
||||
raise ValueError("Extrapolation mode %s is not supported "
|
||||
"by BSpline." % ext) from e
|
||||
|
||||
return tck(x, der, extrapolate=extrapolate)
|
||||
else:
|
||||
return _impl.splev(x, tck, der, ext)
|
||||
|
||||
|
||||
def splint(a, b, tck, full_output=0):
|
||||
"""
|
||||
Evaluate the definite integral of a B-spline between two given points.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
a, b : float
|
||||
The end-points of the integration interval.
|
||||
tck : tuple or a BSpline instance
|
||||
If a tuple, then it should be a sequence of length 3, containing the
|
||||
vector of knots, the B-spline coefficients, and the degree of the
|
||||
spline (see `splev`).
|
||||
full_output : int, optional
|
||||
Non-zero to return optional output.
|
||||
|
||||
Returns
|
||||
-------
|
||||
integral : float
|
||||
The resulting integral.
|
||||
wrk : ndarray
|
||||
An array containing the integrals of the normalized B-splines
|
||||
defined on the set of knots.
|
||||
(Only returned if `full_output` is non-zero)
|
||||
|
||||
See Also
|
||||
--------
|
||||
splprep, splrep, sproot, spalde, splev
|
||||
bisplrep, bisplev
|
||||
BSpline
|
||||
|
||||
Notes
|
||||
-----
|
||||
`splint` silently assumes that the spline function is zero outside the data
|
||||
interval (`a`, `b`).
|
||||
|
||||
Manipulating the tck-tuples directly is not recommended. In new code,
|
||||
prefer using the `BSpline` objects.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
|
||||
J. Inst. Maths Applics, 17, p.37-41, 1976.
|
||||
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
|
||||
on Numerical Analysis, Oxford University Press, 1993.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
|
||||
|
||||
"""
|
||||
if isinstance(tck, BSpline):
|
||||
if tck.c.ndim > 1:
|
||||
mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
|
||||
"not allowed. Use BSpline.integrate() instead.")
|
||||
raise ValueError(mesg)
|
||||
|
||||
if full_output != 0:
|
||||
mesg = ("full_output = %s is not supported. Proceeding as if "
|
||||
"full_output = 0" % full_output)
|
||||
|
||||
return tck.integrate(a, b, extrapolate=False)
|
||||
else:
|
||||
return _impl.splint(a, b, tck, full_output)
|
||||
|
||||
|
||||
def sproot(tck, mest=10):
|
||||
"""
|
||||
Find the roots of a cubic B-spline.
|
||||
|
||||
Given the knots (>=8) and coefficients of a cubic B-spline return the
|
||||
roots of the spline.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tck : tuple or a BSpline object
|
||||
If a tuple, then it should be a sequence of length 3, containing the
|
||||
vector of knots, the B-spline coefficients, and the degree of the
|
||||
spline.
|
||||
The number of knots must be >= 8, and the degree must be 3.
|
||||
The knots must be a montonically increasing sequence.
|
||||
mest : int, optional
|
||||
An estimate of the number of zeros (Default is 10).
|
||||
|
||||
Returns
|
||||
-------
|
||||
zeros : ndarray
|
||||
An array giving the roots of the spline.
|
||||
|
||||
See Also
|
||||
--------
|
||||
splprep, splrep, splint, spalde, splev
|
||||
bisplrep, bisplev
|
||||
BSpline
|
||||
|
||||
Notes
|
||||
-----
|
||||
Manipulating the tck-tuples directly is not recommended. In new code,
|
||||
prefer using the `BSpline` objects.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
|
||||
Theory, 6, p.50-62, 1972.
|
||||
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
|
||||
Applics, 10, p.134-149, 1972.
|
||||
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
|
||||
on Numerical Analysis, Oxford University Press, 1993.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
For some data, this method may miss a root. This happens when one of
|
||||
the spline knots (which FITPACK places automatically) happens to
|
||||
coincide with the true root. A workaround is to convert to `PPoly`,
|
||||
which uses a different root-finding algorithm.
|
||||
|
||||
For example,
|
||||
|
||||
>>> x = [1.96, 1.97, 1.98, 1.99, 2.00, 2.01, 2.02, 2.03, 2.04, 2.05]
|
||||
>>> y = [-6.365470e-03, -4.790580e-03, -3.204320e-03, -1.607270e-03,
|
||||
... 4.440892e-16, 1.616930e-03, 3.243000e-03, 4.877670e-03,
|
||||
... 6.520430e-03, 8.170770e-03]
|
||||
>>> from scipy.interpolate import splrep, sproot, PPoly
|
||||
>>> tck = splrep(x, y, s=0)
|
||||
>>> sproot(tck)
|
||||
array([], dtype=float64)
|
||||
|
||||
Converting to a PPoly object does find the roots at `x=2`:
|
||||
|
||||
>>> ppoly = PPoly.from_spline(tck)
|
||||
>>> ppoly.roots(extrapolate=False)
|
||||
array([2.])
|
||||
|
||||
|
||||
Further examples are given :ref:`in the tutorial
|
||||
<tutorial-interpolate_splXXX>`.
|
||||
|
||||
"""
|
||||
if isinstance(tck, BSpline):
|
||||
if tck.c.ndim > 1:
|
||||
mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
|
||||
"not allowed.")
|
||||
raise ValueError(mesg)
|
||||
|
||||
t, c, k = tck.tck
|
||||
|
||||
# _impl.sproot expects the interpolation axis to be last, so roll it.
|
||||
# NB: This transpose is a no-op if c is 1D.
|
||||
sh = tuple(range(c.ndim))
|
||||
c = c.transpose(sh[1:] + (0,))
|
||||
return _impl.sproot((t, c, k), mest)
|
||||
else:
|
||||
return _impl.sproot(tck, mest)
|
||||
|
||||
|
||||
def spalde(x, tck):
|
||||
"""
|
||||
Evaluate a B-spline and all its derivatives at one point (or set of points) up
|
||||
to order k (the degree of the spline), being 0 the spline itself.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
A point or a set of points at which to evaluate the derivatives.
|
||||
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
|
||||
tck : tuple
|
||||
A tuple (t,c,k) containing the vector of knots,
|
||||
the B-spline coefficients, and the degree of the spline whose
|
||||
derivatives to compute.
|
||||
|
||||
Returns
|
||||
-------
|
||||
results : {ndarray, list of ndarrays}
|
||||
An array (or a list of arrays) containing all derivatives
|
||||
up to order k inclusive for each point `x`, being the first element the
|
||||
spline itself.
|
||||
|
||||
See Also
|
||||
--------
|
||||
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
|
||||
UnivariateSpline, BivariateSpline
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
|
||||
6 (1972) 50-62.
|
||||
.. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
|
||||
applics 10 (1972) 134-149.
|
||||
.. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
|
||||
Numerical Analysis, Oxford University Press, 1993.
|
||||
|
||||
Examples
|
||||
--------
|
||||
To calculate the derivatives of a B-spline there are several aproaches.
|
||||
In this example, we will demonstrate that `spalde` is equivalent to
|
||||
calling `splev` and `splder`.
|
||||
|
||||
>>> import numpy as np
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> from scipy.interpolate import BSpline, spalde, splder, splev
|
||||
|
||||
>>> # Store characteristic parameters of a B-spline
|
||||
>>> tck = ((-2, -2, -2, -2, -1, 0, 1, 2, 2, 2, 2), # knots
|
||||
... (0, 0, 0, 6, 0, 0, 0), # coefficients
|
||||
... 3) # degree (cubic)
|
||||
>>> # Instance a B-spline object
|
||||
>>> # `BSpline` objects are prefered, except for spalde()
|
||||
>>> bspl = BSpline(tck[0], tck[1], tck[2])
|
||||
>>> # Generate extra points to get a smooth curve
|
||||
>>> x = np.linspace(min(tck[0]), max(tck[0]), 100)
|
||||
|
||||
Evaluate the curve and all derivatives
|
||||
|
||||
>>> # The order of derivative must be less or equal to k, the degree of the spline
|
||||
>>> # Method 1: spalde()
|
||||
>>> f1_y_bsplin = [spalde(i, tck)[0] for i in x ] # The B-spline itself
|
||||
>>> f1_y_deriv1 = [spalde(i, tck)[1] for i in x ] # 1st derivative
|
||||
>>> f1_y_deriv2 = [spalde(i, tck)[2] for i in x ] # 2nd derivative
|
||||
>>> f1_y_deriv3 = [spalde(i, tck)[3] for i in x ] # 3rd derivative
|
||||
>>> # You can reach the same result by using `splev`and `splder`
|
||||
>>> f2_y_deriv3 = splev(x, bspl, der=3)
|
||||
>>> f3_y_deriv3 = splder(bspl, n=3)(x)
|
||||
|
||||
>>> # Generate a figure with three axes for graphic comparison
|
||||
>>> fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16, 5))
|
||||
>>> suptitle = fig.suptitle(f'Evaluate a B-spline and all derivatives')
|
||||
>>> # Plot B-spline and all derivatives using the three methods
|
||||
>>> orders = range(4)
|
||||
>>> linetypes = ['-', '--', '-.', ':']
|
||||
>>> labels = ['B-Spline', '1st deriv.', '2nd deriv.', '3rd deriv.']
|
||||
>>> functions = ['splev()', 'splder()', 'spalde()']
|
||||
>>> for order, linetype, label in zip(orders, linetypes, labels):
|
||||
... ax1.plot(x, splev(x, bspl, der=order), linetype, label=label)
|
||||
... ax2.plot(x, splder(bspl, n=order)(x), linetype, label=label)
|
||||
... ax3.plot(x, [spalde(i, tck)[order] for i in x], linetype, label=label)
|
||||
>>> for ax, function in zip((ax1, ax2, ax3), functions):
|
||||
... ax.set_title(function)
|
||||
... ax.legend()
|
||||
>>> plt.tight_layout()
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
if isinstance(tck, BSpline):
|
||||
raise TypeError("spalde does not accept BSpline instances.")
|
||||
else:
|
||||
return _impl.spalde(x, tck)
|
||||
|
||||
|
||||
def insert(x, tck, m=1, per=0):
|
||||
"""
|
||||
Insert knots into a B-spline.
|
||||
|
||||
Given the knots and coefficients of a B-spline representation, create a
|
||||
new B-spline with a knot inserted `m` times at point `x`.
|
||||
This is a wrapper around the FORTRAN routine insert of FITPACK.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x (u) : float
|
||||
A knot value at which to insert a new knot. If `tck` was returned
|
||||
from ``splprep``, then the parameter values, u should be given.
|
||||
tck : a `BSpline` instance or a tuple
|
||||
If tuple, then it is expected to be a tuple (t,c,k) containing
|
||||
the vector of knots, the B-spline coefficients, and the degree of
|
||||
the spline.
|
||||
m : int, optional
|
||||
The number of times to insert the given knot (its multiplicity).
|
||||
Default is 1.
|
||||
per : int, optional
|
||||
If non-zero, the input spline is considered periodic.
|
||||
|
||||
Returns
|
||||
-------
|
||||
BSpline instance or a tuple
|
||||
A new B-spline with knots t, coefficients c, and degree k.
|
||||
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
|
||||
In case of a periodic spline (``per != 0``) there must be
|
||||
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
|
||||
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
|
||||
A tuple is returned iff the input argument `tck` is a tuple, otherwise
|
||||
a BSpline object is constructed and returned.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Based on algorithms from [1]_ and [2]_.
|
||||
|
||||
Manipulating the tck-tuples directly is not recommended. In new code,
|
||||
prefer using the `BSpline` objects, in particular `BSpline.insert_knot`
|
||||
method.
|
||||
|
||||
See Also
|
||||
--------
|
||||
BSpline.insert_knot
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
|
||||
Computer Aided Design, 12, p.199-201, 1980.
|
||||
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
|
||||
Numerical Analysis", Oxford University Press, 1993.
|
||||
|
||||
Examples
|
||||
--------
|
||||
You can insert knots into a B-spline.
|
||||
|
||||
>>> from scipy.interpolate import splrep, insert
|
||||
>>> import numpy as np
|
||||
>>> x = np.linspace(0, 10, 5)
|
||||
>>> y = np.sin(x)
|
||||
>>> tck = splrep(x, y)
|
||||
>>> tck[0]
|
||||
array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
|
||||
|
||||
A knot is inserted:
|
||||
|
||||
>>> tck_inserted = insert(3, tck)
|
||||
>>> tck_inserted[0]
|
||||
array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
|
||||
|
||||
Some knots are inserted:
|
||||
|
||||
>>> tck_inserted2 = insert(8, tck, m=3)
|
||||
>>> tck_inserted2[0]
|
||||
array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
|
||||
|
||||
"""
|
||||
if isinstance(tck, BSpline):
|
||||
|
||||
t, c, k = tck.tck
|
||||
|
||||
# FITPACK expects the interpolation axis to be last, so roll it over
|
||||
# NB: if c array is 1D, transposes are no-ops
|
||||
sh = tuple(range(c.ndim))
|
||||
c = c.transpose(sh[1:] + (0,))
|
||||
t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
|
||||
|
||||
# and roll the last axis back
|
||||
c_ = np.asarray(c_)
|
||||
c_ = c_.transpose((sh[-1],) + sh[:-1])
|
||||
return BSpline(t_, c_, k_)
|
||||
else:
|
||||
return _impl.insert(x, tck, m, per)
|
||||
|
||||
|
||||
def splder(tck, n=1):
|
||||
"""
|
||||
Compute the spline representation of the derivative of a given spline
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tck : BSpline instance or tuple
|
||||
BSpline instance or a tuple (t,c,k) containing the vector of knots,
|
||||
the B-spline coefficients, and the degree of the spline whose
|
||||
derivative to compute
|
||||
n : int, optional
|
||||
Order of derivative to evaluate. Default: 1
|
||||
|
||||
Returns
|
||||
-------
|
||||
`BSpline` instance or tuple
|
||||
Spline of order k2=k-n representing the derivative
|
||||
of the input spline.
|
||||
A tuple is returned if the input argument `tck` is a tuple, otherwise
|
||||
a BSpline object is constructed and returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
splantider, splev, spalde
|
||||
BSpline
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
.. versionadded:: 0.13.0
|
||||
|
||||
Examples
|
||||
--------
|
||||
This can be used for finding maxima of a curve:
|
||||
|
||||
>>> from scipy.interpolate import splrep, splder, sproot
|
||||
>>> import numpy as np
|
||||
>>> x = np.linspace(0, 10, 70)
|
||||
>>> y = np.sin(x)
|
||||
>>> spl = splrep(x, y, k=4)
|
||||
|
||||
Now, differentiate the spline and find the zeros of the
|
||||
derivative. (NB: `sproot` only works for order 3 splines, so we
|
||||
fit an order 4 spline):
|
||||
|
||||
>>> dspl = splder(spl)
|
||||
>>> sproot(dspl) / np.pi
|
||||
array([ 0.50000001, 1.5 , 2.49999998])
|
||||
|
||||
This agrees well with roots :math:`\\pi/2 + n\\pi` of
|
||||
:math:`\\cos(x) = \\sin'(x)`.
|
||||
|
||||
A comparison between `splev`, `splder` and `spalde` to compute the derivatives of a
|
||||
B-spline can be found in the `spalde` examples section.
|
||||
|
||||
"""
|
||||
if isinstance(tck, BSpline):
|
||||
return tck.derivative(n)
|
||||
else:
|
||||
return _impl.splder(tck, n)
|
||||
|
||||
|
||||
def splantider(tck, n=1):
|
||||
"""
|
||||
Compute the spline for the antiderivative (integral) of a given spline.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tck : BSpline instance or a tuple of (t, c, k)
|
||||
Spline whose antiderivative to compute
|
||||
n : int, optional
|
||||
Order of antiderivative to evaluate. Default: 1
|
||||
|
||||
Returns
|
||||
-------
|
||||
BSpline instance or a tuple of (t2, c2, k2)
|
||||
Spline of order k2=k+n representing the antiderivative of the input
|
||||
spline.
|
||||
A tuple is returned iff the input argument `tck` is a tuple, otherwise
|
||||
a BSpline object is constructed and returned.
|
||||
|
||||
See Also
|
||||
--------
|
||||
splder, splev, spalde
|
||||
BSpline
|
||||
|
||||
Notes
|
||||
-----
|
||||
The `splder` function is the inverse operation of this function.
|
||||
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
|
||||
rounding error.
|
||||
|
||||
.. versionadded:: 0.13.0
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.interpolate import splrep, splder, splantider, splev
|
||||
>>> import numpy as np
|
||||
>>> x = np.linspace(0, np.pi/2, 70)
|
||||
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
|
||||
>>> spl = splrep(x, y)
|
||||
|
||||
The derivative is the inverse operation of the antiderivative,
|
||||
although some floating point error accumulates:
|
||||
|
||||
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
|
||||
(array(2.1565429877197317), array(2.1565429877201865))
|
||||
|
||||
Antiderivative can be used to evaluate definite integrals:
|
||||
|
||||
>>> ispl = splantider(spl)
|
||||
>>> splev(np.pi/2, ispl) - splev(0, ispl)
|
||||
2.2572053588768486
|
||||
|
||||
This is indeed an approximation to the complete elliptic integral
|
||||
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
|
||||
|
||||
>>> from scipy.special import ellipk
|
||||
>>> ellipk(0.8)
|
||||
2.2572053268208538
|
||||
|
||||
"""
|
||||
if isinstance(tck, BSpline):
|
||||
return tck.antiderivative(n)
|
||||
else:
|
||||
return _impl.splantider(tck, n)
|
||||
|
||||
2242
venv/lib/python3.12/site-packages/scipy/interpolate/_interpolate.py
Normal file
2242
venv/lib/python3.12/site-packages/scipy/interpolate/_interpolate.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,358 @@
|
||||
import itertools
|
||||
import functools
|
||||
import operator
|
||||
import numpy as np
|
||||
|
||||
from math import prod
|
||||
|
||||
from . import _bspl # type: ignore
|
||||
|
||||
import scipy.sparse.linalg as ssl
|
||||
from scipy.sparse import csr_array
|
||||
|
||||
from ._bsplines import _not_a_knot
|
||||
|
||||
__all__ = ["NdBSpline"]
|
||||
|
||||
|
||||
def _get_dtype(dtype):
|
||||
"""Return np.complex128 for complex dtypes, np.float64 otherwise."""
|
||||
if np.issubdtype(dtype, np.complexfloating):
|
||||
return np.complex128
|
||||
else:
|
||||
return np.float64
|
||||
|
||||
|
||||
class NdBSpline:
|
||||
"""Tensor product spline object.
|
||||
|
||||
The value at point ``xp = (x1, x2, ..., xN)`` is evaluated as a linear
|
||||
combination of products of one-dimensional b-splines in each of the ``N``
|
||||
dimensions::
|
||||
|
||||
c[i1, i2, ..., iN] * B(x1; i1, t1) * B(x2; i2, t2) * ... * B(xN; iN, tN)
|
||||
|
||||
|
||||
Here ``B(x; i, t)`` is the ``i``-th b-spline defined by the knot vector
|
||||
``t`` evaluated at ``x``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
t : tuple of 1D ndarrays
|
||||
knot vectors in directions 1, 2, ... N,
|
||||
``len(t[i]) == n[i] + k + 1``
|
||||
c : ndarray, shape (n1, n2, ..., nN, ...)
|
||||
b-spline coefficients
|
||||
k : int or length-d tuple of integers
|
||||
spline degrees.
|
||||
A single integer is interpreted as having this degree for
|
||||
all dimensions.
|
||||
extrapolate : bool, optional
|
||||
Whether to extrapolate out-of-bounds inputs, or return `nan`.
|
||||
Default is to extrapolate.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
t : tuple of ndarrays
|
||||
Knots vectors.
|
||||
c : ndarray
|
||||
Coefficients of the tensor-produce spline.
|
||||
k : tuple of integers
|
||||
Degrees for each dimension.
|
||||
extrapolate : bool, optional
|
||||
Whether to extrapolate or return nans for out-of-bounds inputs.
|
||||
Defaults to true.
|
||||
|
||||
Methods
|
||||
-------
|
||||
__call__
|
||||
design_matrix
|
||||
|
||||
See Also
|
||||
--------
|
||||
BSpline : a one-dimensional B-spline object
|
||||
NdPPoly : an N-dimensional piecewise tensor product polynomial
|
||||
|
||||
"""
|
||||
def __init__(self, t, c, k, *, extrapolate=None):
|
||||
ndim = len(t)
|
||||
|
||||
try:
|
||||
len(k)
|
||||
except TypeError:
|
||||
# make k a tuple
|
||||
k = (k,)*ndim
|
||||
|
||||
if len(k) != ndim:
|
||||
raise ValueError(f"{len(t) = } != {len(k) = }.")
|
||||
|
||||
self.k = tuple(operator.index(ki) for ki in k)
|
||||
self.t = tuple(np.ascontiguousarray(ti, dtype=float) for ti in t)
|
||||
self.c = np.asarray(c)
|
||||
|
||||
if extrapolate is None:
|
||||
extrapolate = True
|
||||
self.extrapolate = bool(extrapolate)
|
||||
|
||||
self.c = np.asarray(c)
|
||||
|
||||
for d in range(ndim):
|
||||
td = self.t[d]
|
||||
kd = self.k[d]
|
||||
n = td.shape[0] - kd - 1
|
||||
if kd < 0:
|
||||
raise ValueError(f"Spline degree in dimension {d} cannot be"
|
||||
f" negative.")
|
||||
if td.ndim != 1:
|
||||
raise ValueError(f"Knot vector in dimension {d} must be"
|
||||
f" one-dimensional.")
|
||||
if n < kd + 1:
|
||||
raise ValueError(f"Need at least {2*kd + 2} knots for degree"
|
||||
f" {kd} in dimension {d}.")
|
||||
if (np.diff(td) < 0).any():
|
||||
raise ValueError(f"Knots in dimension {d} must be in a"
|
||||
f" non-decreasing order.")
|
||||
if len(np.unique(td[kd:n + 1])) < 2:
|
||||
raise ValueError(f"Need at least two internal knots in"
|
||||
f" dimension {d}.")
|
||||
if not np.isfinite(td).all():
|
||||
raise ValueError(f"Knots in dimension {d} should not have"
|
||||
f" nans or infs.")
|
||||
if self.c.ndim < ndim:
|
||||
raise ValueError(f"Coefficients must be at least"
|
||||
f" {d}-dimensional.")
|
||||
if self.c.shape[d] != n:
|
||||
raise ValueError(f"Knots, coefficients and degree in dimension"
|
||||
f" {d} are inconsistent:"
|
||||
f" got {self.c.shape[d]} coefficients for"
|
||||
f" {len(td)} knots, need at least {n} for"
|
||||
f" k={k}.")
|
||||
|
||||
dt = _get_dtype(self.c.dtype)
|
||||
self.c = np.ascontiguousarray(self.c, dtype=dt)
|
||||
|
||||
def __call__(self, xi, *, nu=None, extrapolate=None):
|
||||
"""Evaluate the tensor product b-spline at ``xi``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
xi : array_like, shape(..., ndim)
|
||||
The coordinates to evaluate the interpolator at.
|
||||
This can be a list or tuple of ndim-dimensional points
|
||||
or an array with the shape (num_points, ndim).
|
||||
nu : array_like, optional, shape (ndim,)
|
||||
Orders of derivatives to evaluate. Each must be non-negative.
|
||||
Defaults to the zeroth derivivative.
|
||||
extrapolate : bool, optional
|
||||
Whether to exrapolate based on first and last intervals in each
|
||||
dimension, or return `nan`. Default is to ``self.extrapolate``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
values : ndarray, shape ``xi.shape[:-1] + self.c.shape[ndim:]``
|
||||
Interpolated values at ``xi``
|
||||
"""
|
||||
ndim = len(self.t)
|
||||
|
||||
if extrapolate is None:
|
||||
extrapolate = self.extrapolate
|
||||
extrapolate = bool(extrapolate)
|
||||
|
||||
if nu is None:
|
||||
nu = np.zeros((ndim,), dtype=np.intc)
|
||||
else:
|
||||
nu = np.asarray(nu, dtype=np.intc)
|
||||
if nu.ndim != 1 or nu.shape[0] != ndim:
|
||||
raise ValueError(
|
||||
f"invalid number of derivative orders {nu = } for "
|
||||
f"ndim = {len(self.t)}.")
|
||||
if any(nu < 0):
|
||||
raise ValueError(f"derivatives must be positive, got {nu = }")
|
||||
|
||||
# prepare xi : shape (..., m1, ..., md) -> (1, m1, ..., md)
|
||||
xi = np.asarray(xi, dtype=float)
|
||||
xi_shape = xi.shape
|
||||
xi = xi.reshape(-1, xi_shape[-1])
|
||||
xi = np.ascontiguousarray(xi)
|
||||
|
||||
if xi_shape[-1] != ndim:
|
||||
raise ValueError(f"Shapes: xi.shape={xi_shape} and ndim={ndim}")
|
||||
|
||||
# prepare k & t
|
||||
_k = np.asarray(self.k, dtype=np.dtype("long"))
|
||||
|
||||
# pack the knots into a single array
|
||||
len_t = [len(ti) for ti in self.t]
|
||||
_t = np.empty((ndim, max(len_t)), dtype=float)
|
||||
_t.fill(np.nan)
|
||||
for d in range(ndim):
|
||||
_t[d, :len(self.t[d])] = self.t[d]
|
||||
len_t = np.asarray(len_t, dtype=np.dtype("long"))
|
||||
|
||||
# tabulate the flat indices for iterating over the (k+1)**ndim subarray
|
||||
shape = tuple(kd + 1 for kd in self.k)
|
||||
indices = np.unravel_index(np.arange(prod(shape)), shape)
|
||||
_indices_k1d = np.asarray(indices, dtype=np.intp).T
|
||||
|
||||
# prepare the coefficients: flatten the trailing dimensions
|
||||
c1 = self.c.reshape(self.c.shape[:ndim] + (-1,))
|
||||
c1r = c1.ravel()
|
||||
|
||||
# replacement for np.ravel_multi_index for indexing of `c1`:
|
||||
_strides_c1 = np.asarray([s // c1.dtype.itemsize
|
||||
for s in c1.strides], dtype=np.intp)
|
||||
|
||||
num_c_tr = c1.shape[-1] # # of trailing coefficients
|
||||
out = np.empty(xi.shape[:-1] + (num_c_tr,), dtype=c1.dtype)
|
||||
|
||||
_bspl.evaluate_ndbspline(xi,
|
||||
_t,
|
||||
len_t,
|
||||
_k,
|
||||
nu,
|
||||
extrapolate,
|
||||
c1r,
|
||||
num_c_tr,
|
||||
_strides_c1,
|
||||
_indices_k1d,
|
||||
out,)
|
||||
|
||||
return out.reshape(xi_shape[:-1] + self.c.shape[ndim:])
|
||||
|
||||
@classmethod
|
||||
def design_matrix(cls, xvals, t, k, extrapolate=True):
|
||||
"""Construct the design matrix as a CSR format sparse array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
xvals : ndarray, shape(npts, ndim)
|
||||
Data points. ``xvals[j, :]`` gives the ``j``-th data point as an
|
||||
``ndim``-dimensional array.
|
||||
t : tuple of 1D ndarrays, length-ndim
|
||||
Knot vectors in directions 1, 2, ... ndim,
|
||||
k : int
|
||||
B-spline degree.
|
||||
extrapolate : bool, optional
|
||||
Whether to extrapolate out-of-bounds values of raise a `ValueError`
|
||||
|
||||
Returns
|
||||
-------
|
||||
design_matrix : a CSR array
|
||||
Each row of the design matrix corresponds to a value in `xvals` and
|
||||
contains values of b-spline basis elements which are non-zero
|
||||
at this value.
|
||||
|
||||
"""
|
||||
xvals = np.asarray(xvals, dtype=float)
|
||||
ndim = xvals.shape[-1]
|
||||
if len(t) != ndim:
|
||||
raise ValueError(
|
||||
f"Data and knots are inconsistent: len(t) = {len(t)} for "
|
||||
f" {ndim = }."
|
||||
)
|
||||
try:
|
||||
len(k)
|
||||
except TypeError:
|
||||
# make k a tuple
|
||||
k = (k,)*ndim
|
||||
|
||||
kk = np.asarray(k, dtype=np.int32)
|
||||
data, indices, indptr = _bspl._colloc_nd(xvals, t, kk)
|
||||
return csr_array((data, indices, indptr))
|
||||
|
||||
|
||||
def _iter_solve(a, b, solver=ssl.gcrotmk, **solver_args):
|
||||
# work around iterative solvers not accepting multiple r.h.s.
|
||||
|
||||
# also work around a.dtype == float64 and b.dtype == complex128
|
||||
# cf https://github.com/scipy/scipy/issues/19644
|
||||
if np.issubdtype(b.dtype, np.complexfloating):
|
||||
real = _iter_solve(a, b.real, solver, **solver_args)
|
||||
imag = _iter_solve(a, b.imag, solver, **solver_args)
|
||||
return real + 1j*imag
|
||||
|
||||
if b.ndim == 2 and b.shape[1] !=1:
|
||||
res = np.empty_like(b)
|
||||
for j in range(b.shape[1]):
|
||||
res[:, j], info = solver(a, b[:, j], **solver_args)
|
||||
if info != 0:
|
||||
raise ValueError(f"{solver = } returns {info =} for column {j}.")
|
||||
return res
|
||||
else:
|
||||
res, info = solver(a, b, **solver_args)
|
||||
if info != 0:
|
||||
raise ValueError(f"{solver = } returns {info = }.")
|
||||
return res
|
||||
|
||||
|
||||
def make_ndbspl(points, values, k=3, *, solver=ssl.gcrotmk, **solver_args):
|
||||
"""Construct an interpolating NdBspline.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
points : tuple of ndarrays of float, with shapes (m1,), ... (mN,)
|
||||
The points defining the regular grid in N dimensions. The points in
|
||||
each dimension (i.e. every element of the `points` tuple) must be
|
||||
strictly ascending or descending.
|
||||
values : ndarray of float, shape (m1, ..., mN, ...)
|
||||
The data on the regular grid in n dimensions.
|
||||
k : int, optional
|
||||
The spline degree. Must be odd. Default is cubic, k=3
|
||||
solver : a `scipy.sparse.linalg` solver (iterative or direct), optional.
|
||||
An iterative solver from `scipy.sparse.linalg` or a direct one,
|
||||
`sparse.sparse.linalg.spsolve`.
|
||||
Used to solve the sparse linear system
|
||||
``design_matrix @ coefficients = rhs`` for the coefficients.
|
||||
Default is `scipy.sparse.linalg.gcrotmk`
|
||||
solver_args : dict, optional
|
||||
Additional arguments for the solver. The call signature is
|
||||
``solver(csr_array, rhs_vector, **solver_args)``
|
||||
|
||||
Returns
|
||||
-------
|
||||
spl : NdBSpline object
|
||||
|
||||
Notes
|
||||
-----
|
||||
Boundary conditions are not-a-knot in all dimensions.
|
||||
"""
|
||||
ndim = len(points)
|
||||
xi_shape = tuple(len(x) for x in points)
|
||||
|
||||
try:
|
||||
len(k)
|
||||
except TypeError:
|
||||
# make k a tuple
|
||||
k = (k,)*ndim
|
||||
|
||||
for d, point in enumerate(points):
|
||||
numpts = len(np.atleast_1d(point))
|
||||
if numpts <= k[d]:
|
||||
raise ValueError(f"There are {numpts} points in dimension {d},"
|
||||
f" but order {k[d]} requires at least "
|
||||
f" {k[d]+1} points per dimension.")
|
||||
|
||||
t = tuple(_not_a_knot(np.asarray(points[d], dtype=float), k[d])
|
||||
for d in range(ndim))
|
||||
xvals = np.asarray([xv for xv in itertools.product(*points)], dtype=float)
|
||||
|
||||
# construct the colocation matrix
|
||||
matr = NdBSpline.design_matrix(xvals, t, k)
|
||||
|
||||
# Solve for the coefficients given `values`.
|
||||
# Trailing dimensions: first ndim dimensions are data, the rest are batch
|
||||
# dimensions, so stack `values` into a 2D array for `spsolve` to undestand.
|
||||
v_shape = values.shape
|
||||
vals_shape = (prod(v_shape[:ndim]), prod(v_shape[ndim:]))
|
||||
vals = values.reshape(vals_shape)
|
||||
|
||||
if solver != ssl.spsolve:
|
||||
solver = functools.partial(_iter_solve, solver=solver)
|
||||
if "atol" not in solver_args:
|
||||
# avoid a DeprecationWarning, grumble grumble
|
||||
solver_args["atol"] = 1e-6
|
||||
|
||||
coef = solver(matr, vals, **solver_args)
|
||||
coef = coef.reshape(xi_shape + v_shape[ndim:])
|
||||
return NdBSpline(t, coef, k)
|
||||
|
||||
@ -0,0 +1,332 @@
|
||||
"""
|
||||
Convenience interface to N-D interpolation
|
||||
|
||||
.. versionadded:: 0.9
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
|
||||
CloughTocher2DInterpolator, _ndim_coords_from_arrays
|
||||
from scipy.spatial import cKDTree
|
||||
|
||||
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
|
||||
'CloughTocher2DInterpolator']
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Nearest-neighbor interpolation
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class NearestNDInterpolator(NDInterpolatorBase):
|
||||
"""NearestNDInterpolator(x, y).
|
||||
|
||||
Nearest-neighbor interpolator in N > 1 dimensions.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
|
||||
Methods
|
||||
-------
|
||||
__call__
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : (npoints, ndims) 2-D ndarray of floats
|
||||
Data point coordinates.
|
||||
y : (npoints, ) 1-D ndarray of float or complex
|
||||
Data values.
|
||||
rescale : boolean, optional
|
||||
Rescale points to unit cube before performing interpolation.
|
||||
This is useful if some of the input dimensions have
|
||||
incommensurable units and differ by many orders of magnitude.
|
||||
|
||||
.. versionadded:: 0.14.0
|
||||
tree_options : dict, optional
|
||||
Options passed to the underlying ``cKDTree``.
|
||||
|
||||
.. versionadded:: 0.17.0
|
||||
|
||||
See Also
|
||||
--------
|
||||
griddata :
|
||||
Interpolate unstructured D-D data.
|
||||
LinearNDInterpolator :
|
||||
Piecewise linear interpolator in N dimensions.
|
||||
CloughTocher2DInterpolator :
|
||||
Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
|
||||
interpn : Interpolation on a regular grid or rectilinear grid.
|
||||
RegularGridInterpolator : Interpolator on a regular or rectilinear grid
|
||||
in arbitrary dimensions (`interpn` wraps this
|
||||
class).
|
||||
|
||||
Notes
|
||||
-----
|
||||
Uses ``scipy.spatial.cKDTree``
|
||||
|
||||
.. note:: For data on a regular grid use `interpn` instead.
|
||||
|
||||
Examples
|
||||
--------
|
||||
We can interpolate values on a 2D plane:
|
||||
|
||||
>>> from scipy.interpolate import NearestNDInterpolator
|
||||
>>> import numpy as np
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> rng = np.random.default_rng()
|
||||
>>> x = rng.random(10) - 0.5
|
||||
>>> y = rng.random(10) - 0.5
|
||||
>>> z = np.hypot(x, y)
|
||||
>>> X = np.linspace(min(x), max(x))
|
||||
>>> Y = np.linspace(min(y), max(y))
|
||||
>>> X, Y = np.meshgrid(X, Y) # 2D grid for interpolation
|
||||
>>> interp = NearestNDInterpolator(list(zip(x, y)), z)
|
||||
>>> Z = interp(X, Y)
|
||||
>>> plt.pcolormesh(X, Y, Z, shading='auto')
|
||||
>>> plt.plot(x, y, "ok", label="input point")
|
||||
>>> plt.legend()
|
||||
>>> plt.colorbar()
|
||||
>>> plt.axis("equal")
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, x, y, rescale=False, tree_options=None):
|
||||
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
|
||||
need_contiguous=False,
|
||||
need_values=False)
|
||||
if tree_options is None:
|
||||
tree_options = dict()
|
||||
self.tree = cKDTree(self.points, **tree_options)
|
||||
self.values = np.asarray(y)
|
||||
|
||||
def __call__(self, *args, **query_options):
|
||||
"""
|
||||
Evaluate interpolator at given points.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x1, x2, ... xn : array-like of float
|
||||
Points where to interpolate data at.
|
||||
x1, x2, ... xn can be array-like of float with broadcastable shape.
|
||||
or x1 can be array-like of float with shape ``(..., ndim)``
|
||||
**query_options
|
||||
This allows ``eps``, ``p``, ``distance_upper_bound``, and ``workers``
|
||||
being passed to the cKDTree's query function to be explicitly set.
|
||||
See `scipy.spatial.cKDTree.query` for an overview of the different options.
|
||||
|
||||
.. versionadded:: 1.12.0
|
||||
|
||||
"""
|
||||
# For the sake of enabling subclassing, NDInterpolatorBase._set_xi performs
|
||||
# some operations which are not required by NearestNDInterpolator.__call__,
|
||||
# hence here we operate on xi directly, without calling a parent class function.
|
||||
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
|
||||
xi = self._check_call_shape(xi)
|
||||
xi = self._scale_x(xi)
|
||||
|
||||
# We need to handle two important cases:
|
||||
# (1) the case where xi has trailing dimensions (..., ndim), and
|
||||
# (2) the case where y has trailing dimensions
|
||||
# We will first flatten xi to deal with case (1),
|
||||
# do the computation in flattened array while retaining y's dimensionality,
|
||||
# and then reshape the interpolated values back to match xi's shape.
|
||||
|
||||
# Flatten xi for the query
|
||||
xi_flat = xi.reshape(-1, xi.shape[-1])
|
||||
original_shape = xi.shape
|
||||
flattened_shape = xi_flat.shape
|
||||
|
||||
# if distance_upper_bound is set to not be infinite,
|
||||
# then we need to consider the case where cKDtree
|
||||
# does not find any points within distance_upper_bound to return.
|
||||
# It marks those points as having infinte distance, which is what will be used
|
||||
# below to mask the array and return only the points that were deemed
|
||||
# to have a close enough neighbor to return something useful.
|
||||
dist, i = self.tree.query(xi_flat, **query_options)
|
||||
valid_mask = np.isfinite(dist)
|
||||
|
||||
# create a holder interp_values array and fill with nans.
|
||||
if self.values.ndim > 1:
|
||||
interp_shape = flattened_shape[:-1] + self.values.shape[1:]
|
||||
else:
|
||||
interp_shape = flattened_shape[:-1]
|
||||
|
||||
if np.issubdtype(self.values.dtype, np.complexfloating):
|
||||
interp_values = np.full(interp_shape, np.nan, dtype=self.values.dtype)
|
||||
else:
|
||||
interp_values = np.full(interp_shape, np.nan)
|
||||
|
||||
interp_values[valid_mask] = self.values[i[valid_mask], ...]
|
||||
|
||||
if self.values.ndim > 1:
|
||||
new_shape = original_shape[:-1] + self.values.shape[1:]
|
||||
else:
|
||||
new_shape = original_shape[:-1]
|
||||
interp_values = interp_values.reshape(new_shape)
|
||||
|
||||
return interp_values
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Convenience interface function
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
|
||||
def griddata(points, values, xi, method='linear', fill_value=np.nan,
|
||||
rescale=False):
|
||||
"""
|
||||
Interpolate unstructured D-D data.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
|
||||
Data point coordinates.
|
||||
values : ndarray of float or complex, shape (n,)
|
||||
Data values.
|
||||
xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
|
||||
Points at which to interpolate data.
|
||||
method : {'linear', 'nearest', 'cubic'}, optional
|
||||
Method of interpolation. One of
|
||||
|
||||
``nearest``
|
||||
return the value at the data point closest to
|
||||
the point of interpolation. See `NearestNDInterpolator` for
|
||||
more details.
|
||||
|
||||
``linear``
|
||||
tessellate the input point set to N-D
|
||||
simplices, and interpolate linearly on each simplex. See
|
||||
`LinearNDInterpolator` for more details.
|
||||
|
||||
``cubic`` (1-D)
|
||||
return the value determined from a cubic
|
||||
spline.
|
||||
|
||||
``cubic`` (2-D)
|
||||
return the value determined from a
|
||||
piecewise cubic, continuously differentiable (C1), and
|
||||
approximately curvature-minimizing polynomial surface. See
|
||||
`CloughTocher2DInterpolator` for more details.
|
||||
fill_value : float, optional
|
||||
Value used to fill in for requested points outside of the
|
||||
convex hull of the input points. If not provided, then the
|
||||
default is ``nan``. This option has no effect for the
|
||||
'nearest' method.
|
||||
rescale : bool, optional
|
||||
Rescale points to unit cube before performing interpolation.
|
||||
This is useful if some of the input dimensions have
|
||||
incommensurable units and differ by many orders of magnitude.
|
||||
|
||||
.. versionadded:: 0.14.0
|
||||
|
||||
Returns
|
||||
-------
|
||||
ndarray
|
||||
Array of interpolated values.
|
||||
|
||||
See Also
|
||||
--------
|
||||
LinearNDInterpolator :
|
||||
Piecewise linear interpolator in N dimensions.
|
||||
NearestNDInterpolator :
|
||||
Nearest-neighbor interpolator in N dimensions.
|
||||
CloughTocher2DInterpolator :
|
||||
Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
|
||||
interpn : Interpolation on a regular grid or rectilinear grid.
|
||||
RegularGridInterpolator : Interpolator on a regular or rectilinear grid
|
||||
in arbitrary dimensions (`interpn` wraps this
|
||||
class).
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
.. versionadded:: 0.9
|
||||
|
||||
.. note:: For data on a regular grid use `interpn` instead.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Suppose we want to interpolate the 2-D function
|
||||
|
||||
>>> import numpy as np
|
||||
>>> def func(x, y):
|
||||
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
|
||||
|
||||
on a grid in [0, 1]x[0, 1]
|
||||
|
||||
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
|
||||
|
||||
but we only know its values at 1000 data points:
|
||||
|
||||
>>> rng = np.random.default_rng()
|
||||
>>> points = rng.random((1000, 2))
|
||||
>>> values = func(points[:,0], points[:,1])
|
||||
|
||||
This can be done with `griddata` -- below we try out all of the
|
||||
interpolation methods:
|
||||
|
||||
>>> from scipy.interpolate import griddata
|
||||
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
|
||||
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
|
||||
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
|
||||
|
||||
One can see that the exact result is reproduced by all of the
|
||||
methods to some degree, but for this smooth function the piecewise
|
||||
cubic interpolant gives the best results:
|
||||
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> plt.subplot(221)
|
||||
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
|
||||
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
|
||||
>>> plt.title('Original')
|
||||
>>> plt.subplot(222)
|
||||
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
|
||||
>>> plt.title('Nearest')
|
||||
>>> plt.subplot(223)
|
||||
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
|
||||
>>> plt.title('Linear')
|
||||
>>> plt.subplot(224)
|
||||
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
|
||||
>>> plt.title('Cubic')
|
||||
>>> plt.gcf().set_size_inches(6, 6)
|
||||
>>> plt.show()
|
||||
|
||||
""" # numpy/numpydoc#87 # noqa: E501
|
||||
|
||||
points = _ndim_coords_from_arrays(points)
|
||||
|
||||
if points.ndim < 2:
|
||||
ndim = points.ndim
|
||||
else:
|
||||
ndim = points.shape[-1]
|
||||
|
||||
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
|
||||
from ._interpolate import interp1d
|
||||
points = points.ravel()
|
||||
if isinstance(xi, tuple):
|
||||
if len(xi) != 1:
|
||||
raise ValueError("invalid number of dimensions in xi")
|
||||
xi, = xi
|
||||
# Sort points/values together, necessary as input for interp1d
|
||||
idx = np.argsort(points)
|
||||
points = points[idx]
|
||||
values = values[idx]
|
||||
if method == 'nearest':
|
||||
fill_value = 'extrapolate'
|
||||
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
|
||||
fill_value=fill_value)
|
||||
return ip(xi)
|
||||
elif method == 'nearest':
|
||||
ip = NearestNDInterpolator(points, values, rescale=rescale)
|
||||
return ip(xi)
|
||||
elif method == 'linear':
|
||||
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
|
||||
rescale=rescale)
|
||||
return ip(xi)
|
||||
elif method == 'cubic' and ndim == 2:
|
||||
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
|
||||
rescale=rescale)
|
||||
return ip(xi)
|
||||
else:
|
||||
raise ValueError("Unknown interpolation method %r for "
|
||||
"%d dimensional data" % (method, ndim))
|
||||
67
venv/lib/python3.12/site-packages/scipy/interpolate/_pade.py
Normal file
67
venv/lib/python3.12/site-packages/scipy/interpolate/_pade.py
Normal file
@ -0,0 +1,67 @@
|
||||
from numpy import zeros, asarray, eye, poly1d, hstack, r_
|
||||
from scipy import linalg
|
||||
|
||||
__all__ = ["pade"]
|
||||
|
||||
def pade(an, m, n=None):
|
||||
"""
|
||||
Return Pade approximation to a polynomial as the ratio of two polynomials.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
an : (N,) array_like
|
||||
Taylor series coefficients.
|
||||
m : int
|
||||
The order of the returned approximating polynomial `q`.
|
||||
n : int, optional
|
||||
The order of the returned approximating polynomial `p`. By default,
|
||||
the order is ``len(an)-1-m``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
p, q : Polynomial class
|
||||
The Pade approximation of the polynomial defined by `an` is
|
||||
``p(x)/q(x)``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from scipy.interpolate import pade
|
||||
>>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
|
||||
>>> p, q = pade(e_exp, 2)
|
||||
|
||||
>>> e_exp.reverse()
|
||||
>>> e_poly = np.poly1d(e_exp)
|
||||
|
||||
Compare ``e_poly(x)`` and the Pade approximation ``p(x)/q(x)``
|
||||
|
||||
>>> e_poly(1)
|
||||
2.7166666666666668
|
||||
|
||||
>>> p(1)/q(1)
|
||||
2.7179487179487181
|
||||
|
||||
"""
|
||||
an = asarray(an)
|
||||
if n is None:
|
||||
n = len(an) - 1 - m
|
||||
if n < 0:
|
||||
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
|
||||
if n < 0:
|
||||
raise ValueError("Order of p <n> must be greater than 0.")
|
||||
N = m + n
|
||||
if N > len(an)-1:
|
||||
raise ValueError("Order of q+p <m+n> must be smaller than len(an).")
|
||||
an = an[:N+1]
|
||||
Akj = eye(N+1, n+1, dtype=an.dtype)
|
||||
Bkj = zeros((N+1, m), dtype=an.dtype)
|
||||
for row in range(1, m+1):
|
||||
Bkj[row,:row] = -(an[:row])[::-1]
|
||||
for row in range(m+1, N+1):
|
||||
Bkj[row,:] = -(an[row-m:row])[::-1]
|
||||
C = hstack((Akj, Bkj))
|
||||
pq = linalg.solve(C, an)
|
||||
p = pq[:n+1]
|
||||
q = r_[1.0, pq[n+1:]]
|
||||
return poly1d(p[::-1]), poly1d(q[::-1])
|
||||
|
||||
938
venv/lib/python3.12/site-packages/scipy/interpolate/_polyint.py
Normal file
938
venv/lib/python3.12/site-packages/scipy/interpolate/_polyint.py
Normal file
@ -0,0 +1,938 @@
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
from scipy.special import factorial
|
||||
from scipy._lib._util import _asarray_validated, float_factorial, check_random_state
|
||||
|
||||
|
||||
__all__ = ["KroghInterpolator", "krogh_interpolate",
|
||||
"BarycentricInterpolator", "barycentric_interpolate",
|
||||
"approximate_taylor_polynomial"]
|
||||
|
||||
|
||||
def _isscalar(x):
|
||||
"""Check whether x is if a scalar type, or 0-dim"""
|
||||
return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
|
||||
|
||||
|
||||
class _Interpolator1D:
|
||||
"""
|
||||
Common features in univariate interpolation
|
||||
|
||||
Deal with input data type and interpolation axis rolling. The
|
||||
actual interpolator can assume the y-data is of shape (n, r) where
|
||||
`n` is the number of x-points, and `r` the number of variables,
|
||||
and use self.dtype as the y-data type.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
_y_axis
|
||||
Axis along which the interpolation goes in the original array
|
||||
_y_extra_shape
|
||||
Additional trailing shape of the input arrays, excluding
|
||||
the interpolation axis.
|
||||
dtype
|
||||
Dtype of the y-data arrays. Can be set via _set_dtype, which
|
||||
forces it to be float or complex.
|
||||
|
||||
Methods
|
||||
-------
|
||||
__call__
|
||||
_prepare_x
|
||||
_finish_y
|
||||
_reshape_yi
|
||||
_set_yi
|
||||
_set_dtype
|
||||
_evaluate
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
|
||||
|
||||
def __init__(self, xi=None, yi=None, axis=None):
|
||||
self._y_axis = axis
|
||||
self._y_extra_shape = None
|
||||
self.dtype = None
|
||||
if yi is not None:
|
||||
self._set_yi(yi, xi=xi, axis=axis)
|
||||
|
||||
def __call__(self, x):
|
||||
"""
|
||||
Evaluate the interpolant
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
Point or points at which to evaluate the interpolant.
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : array_like
|
||||
Interpolated values. Shape is determined by replacing
|
||||
the interpolation axis in the original array with the shape of `x`.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Input values `x` must be convertible to `float` values like `int`
|
||||
or `float`.
|
||||
|
||||
"""
|
||||
x, x_shape = self._prepare_x(x)
|
||||
y = self._evaluate(x)
|
||||
return self._finish_y(y, x_shape)
|
||||
|
||||
def _evaluate(self, x):
|
||||
"""
|
||||
Actually evaluate the value of the interpolator.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _prepare_x(self, x):
|
||||
"""Reshape input x array to 1-D"""
|
||||
x = _asarray_validated(x, check_finite=False, as_inexact=True)
|
||||
x_shape = x.shape
|
||||
return x.ravel(), x_shape
|
||||
|
||||
def _finish_y(self, y, x_shape):
|
||||
"""Reshape interpolated y back to an N-D array similar to initial y"""
|
||||
y = y.reshape(x_shape + self._y_extra_shape)
|
||||
if self._y_axis != 0 and x_shape != ():
|
||||
nx = len(x_shape)
|
||||
ny = len(self._y_extra_shape)
|
||||
s = (list(range(nx, nx + self._y_axis))
|
||||
+ list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
|
||||
y = y.transpose(s)
|
||||
return y
|
||||
|
||||
def _reshape_yi(self, yi, check=False):
|
||||
yi = np.moveaxis(np.asarray(yi), self._y_axis, 0)
|
||||
if check and yi.shape[1:] != self._y_extra_shape:
|
||||
ok_shape = "{!r} + (N,) + {!r}".format(self._y_extra_shape[-self._y_axis:],
|
||||
self._y_extra_shape[:-self._y_axis])
|
||||
raise ValueError("Data must be of shape %s" % ok_shape)
|
||||
return yi.reshape((yi.shape[0], -1))
|
||||
|
||||
def _set_yi(self, yi, xi=None, axis=None):
|
||||
if axis is None:
|
||||
axis = self._y_axis
|
||||
if axis is None:
|
||||
raise ValueError("no interpolation axis specified")
|
||||
|
||||
yi = np.asarray(yi)
|
||||
|
||||
shape = yi.shape
|
||||
if shape == ():
|
||||
shape = (1,)
|
||||
if xi is not None and shape[axis] != len(xi):
|
||||
raise ValueError("x and y arrays must be equal in length along "
|
||||
"interpolation axis.")
|
||||
|
||||
self._y_axis = (axis % yi.ndim)
|
||||
self._y_extra_shape = yi.shape[:self._y_axis] + yi.shape[self._y_axis+1:]
|
||||
self.dtype = None
|
||||
self._set_dtype(yi.dtype)
|
||||
|
||||
def _set_dtype(self, dtype, union=False):
|
||||
if np.issubdtype(dtype, np.complexfloating) \
|
||||
or np.issubdtype(self.dtype, np.complexfloating):
|
||||
self.dtype = np.complex128
|
||||
else:
|
||||
if not union or self.dtype != np.complex128:
|
||||
self.dtype = np.float64
|
||||
|
||||
|
||||
class _Interpolator1DWithDerivatives(_Interpolator1D):
|
||||
def derivatives(self, x, der=None):
|
||||
"""
|
||||
Evaluate several derivatives of the polynomial at the point `x`
|
||||
|
||||
Produce an array of derivatives evaluated at the point `x`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
Point or points at which to evaluate the derivatives
|
||||
der : int or list or None, optional
|
||||
How many derivatives to evaluate, or None for all potentially
|
||||
nonzero derivatives (that is, a number equal to the number
|
||||
of points), or a list of derivatives to evaluate. This number
|
||||
includes the function value as the '0th' derivative.
|
||||
|
||||
Returns
|
||||
-------
|
||||
d : ndarray
|
||||
Array with derivatives; ``d[j]`` contains the jth derivative.
|
||||
Shape of ``d[j]`` is determined by replacing the interpolation
|
||||
axis in the original array with the shape of `x`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.interpolate import KroghInterpolator
|
||||
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
|
||||
array([1.0,2.0,3.0])
|
||||
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
|
||||
array([[1.0,1.0],
|
||||
[2.0,2.0],
|
||||
[3.0,3.0]])
|
||||
|
||||
"""
|
||||
x, x_shape = self._prepare_x(x)
|
||||
y = self._evaluate_derivatives(x, der)
|
||||
|
||||
y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)
|
||||
if self._y_axis != 0 and x_shape != ():
|
||||
nx = len(x_shape)
|
||||
ny = len(self._y_extra_shape)
|
||||
s = ([0] + list(range(nx+1, nx + self._y_axis+1))
|
||||
+ list(range(1, nx+1)) +
|
||||
list(range(nx+1+self._y_axis, nx+ny+1)))
|
||||
y = y.transpose(s)
|
||||
return y
|
||||
|
||||
def derivative(self, x, der=1):
|
||||
"""
|
||||
Evaluate a single derivative of the polynomial at the point `x`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
Point or points at which to evaluate the derivatives
|
||||
|
||||
der : integer, optional
|
||||
Which derivative to evaluate (default: first derivative).
|
||||
This number includes the function value as 0th derivative.
|
||||
|
||||
Returns
|
||||
-------
|
||||
d : ndarray
|
||||
Derivative interpolated at the x-points. Shape of `d` is
|
||||
determined by replacing the interpolation axis in the
|
||||
original array with the shape of `x`.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This may be computed by evaluating all derivatives up to the desired
|
||||
one (using self.derivatives()) and then discarding the rest.
|
||||
|
||||
"""
|
||||
x, x_shape = self._prepare_x(x)
|
||||
y = self._evaluate_derivatives(x, der+1)
|
||||
return self._finish_y(y[der], x_shape)
|
||||
|
||||
def _evaluate_derivatives(self, x, der=None):
|
||||
"""
|
||||
Actually evaluate the derivatives.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
1D array of points at which to evaluate the derivatives
|
||||
der : integer, optional
|
||||
The number of derivatives to evaluate, from 'order 0' (der=1)
|
||||
to order der-1. If omitted, return all possibly-non-zero
|
||||
derivatives, ie 0 to order n-1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
d : ndarray
|
||||
Array of shape ``(der, x.size, self.yi.shape[1])`` containing
|
||||
the derivatives from 0 to der-1
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class KroghInterpolator(_Interpolator1DWithDerivatives):
|
||||
"""
|
||||
Interpolating polynomial for a set of points.
|
||||
|
||||
The polynomial passes through all the pairs ``(xi, yi)``. One may
|
||||
additionally specify a number of derivatives at each point `xi`;
|
||||
this is done by repeating the value `xi` and specifying the
|
||||
derivatives as successive `yi` values.
|
||||
|
||||
Allows evaluation of the polynomial and all its derivatives.
|
||||
For reasons of numerical stability, this function does not compute
|
||||
the coefficients of the polynomial, although they can be obtained
|
||||
by evaluating all the derivatives.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
xi : array_like, shape (npoints, )
|
||||
Known x-coordinates. Must be sorted in increasing order.
|
||||
yi : array_like, shape (..., npoints, ...)
|
||||
Known y-coordinates. When an xi occurs two or more times in
|
||||
a row, the corresponding yi's represent derivative values. The length of `yi`
|
||||
along the interpolation axis must be equal to the length of `xi`. Use the
|
||||
`axis` parameter to select the correct axis.
|
||||
axis : int, optional
|
||||
Axis in the `yi` array corresponding to the x-coordinate values. Defaults to
|
||||
``axis=0``.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Be aware that the algorithms implemented here are not necessarily
|
||||
the most numerically stable known. Moreover, even in a world of
|
||||
exact computation, unless the x coordinates are chosen very
|
||||
carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
|
||||
polynomial interpolation itself is a very ill-conditioned process
|
||||
due to the Runge phenomenon. In general, even with well-chosen
|
||||
x values, degrees higher than about thirty cause problems with
|
||||
numerical instability in this code.
|
||||
|
||||
Based on [1]_.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
|
||||
and Numerical Differentiation", 1970.
|
||||
|
||||
Examples
|
||||
--------
|
||||
To produce a polynomial that is zero at 0 and 1 and has
|
||||
derivative 2 at 0, call
|
||||
|
||||
>>> from scipy.interpolate import KroghInterpolator
|
||||
>>> KroghInterpolator([0,0,1],[0,2,0])
|
||||
|
||||
This constructs the quadratic :math:`2x^2-2x`. The derivative condition
|
||||
is indicated by the repeated zero in the `xi` array; the corresponding
|
||||
yi values are 0, the function value, and 2, the derivative value.
|
||||
|
||||
For another example, given `xi`, `yi`, and a derivative `ypi` for each
|
||||
point, appropriate arrays can be constructed as:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> rng = np.random.default_rng()
|
||||
>>> xi = np.linspace(0, 1, 5)
|
||||
>>> yi, ypi = rng.random((2, 5))
|
||||
>>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
|
||||
>>> KroghInterpolator(xi_k, yi_k)
|
||||
|
||||
To produce a vector-valued polynomial, supply a higher-dimensional
|
||||
array for `yi`:
|
||||
|
||||
>>> KroghInterpolator([0,1],[[2,3],[4,5]])
|
||||
|
||||
This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, xi, yi, axis=0):
|
||||
super().__init__(xi, yi, axis)
|
||||
|
||||
self.xi = np.asarray(xi)
|
||||
self.yi = self._reshape_yi(yi)
|
||||
self.n, self.r = self.yi.shape
|
||||
|
||||
if (deg := self.xi.size) > 30:
|
||||
warnings.warn(f"{deg} degrees provided, degrees higher than about"
|
||||
" thirty cause problems with numerical instability "
|
||||
"with 'KroghInterpolator'", stacklevel=2)
|
||||
|
||||
c = np.zeros((self.n+1, self.r), dtype=self.dtype)
|
||||
c[0] = self.yi[0]
|
||||
Vk = np.zeros((self.n, self.r), dtype=self.dtype)
|
||||
for k in range(1, self.n):
|
||||
s = 0
|
||||
while s <= k and xi[k-s] == xi[k]:
|
||||
s += 1
|
||||
s -= 1
|
||||
Vk[0] = self.yi[k]/float_factorial(s)
|
||||
for i in range(k-s):
|
||||
if xi[i] == xi[k]:
|
||||
raise ValueError("Elements of `xi` can't be equal.")
|
||||
if s == 0:
|
||||
Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
|
||||
else:
|
||||
Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
|
||||
c[k] = Vk[k-s]
|
||||
self.c = c
|
||||
|
||||
def _evaluate(self, x):
|
||||
pi = 1
|
||||
p = np.zeros((len(x), self.r), dtype=self.dtype)
|
||||
p += self.c[0,np.newaxis,:]
|
||||
for k in range(1, self.n):
|
||||
w = x - self.xi[k-1]
|
||||
pi = w*pi
|
||||
p += pi[:,np.newaxis] * self.c[k]
|
||||
return p
|
||||
|
||||
def _evaluate_derivatives(self, x, der=None):
|
||||
n = self.n
|
||||
r = self.r
|
||||
|
||||
if der is None:
|
||||
der = self.n
|
||||
|
||||
pi = np.zeros((n, len(x)))
|
||||
w = np.zeros((n, len(x)))
|
||||
pi[0] = 1
|
||||
p = np.zeros((len(x), self.r), dtype=self.dtype)
|
||||
p += self.c[0, np.newaxis, :]
|
||||
|
||||
for k in range(1, n):
|
||||
w[k-1] = x - self.xi[k-1]
|
||||
pi[k] = w[k-1] * pi[k-1]
|
||||
p += pi[k, :, np.newaxis] * self.c[k]
|
||||
|
||||
cn = np.zeros((max(der, n+1), len(x), r), dtype=self.dtype)
|
||||
cn[:n+1, :, :] += self.c[:n+1, np.newaxis, :]
|
||||
cn[0] = p
|
||||
for k in range(1, n):
|
||||
for i in range(1, n-k+1):
|
||||
pi[i] = w[k+i-1]*pi[i-1] + pi[i]
|
||||
cn[k] = cn[k] + pi[i, :, np.newaxis]*cn[k+i]
|
||||
cn[k] *= float_factorial(k)
|
||||
|
||||
cn[n, :, :] = 0
|
||||
return cn[:der]
|
||||
|
||||
|
||||
def krogh_interpolate(xi, yi, x, der=0, axis=0):
|
||||
"""
|
||||
Convenience function for polynomial interpolation.
|
||||
|
||||
See `KroghInterpolator` for more details.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
xi : array_like
|
||||
Interpolation points (known x-coordinates).
|
||||
yi : array_like
|
||||
Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as
|
||||
vectors of length R, or scalars if R=1.
|
||||
x : array_like
|
||||
Point or points at which to evaluate the derivatives.
|
||||
der : int or list or None, optional
|
||||
How many derivatives to evaluate, or None for all potentially
|
||||
nonzero derivatives (that is, a number equal to the number
|
||||
of points), or a list of derivatives to evaluate. This number
|
||||
includes the function value as the '0th' derivative.
|
||||
axis : int, optional
|
||||
Axis in the `yi` array corresponding to the x-coordinate values.
|
||||
|
||||
Returns
|
||||
-------
|
||||
d : ndarray
|
||||
If the interpolator's values are R-D then the
|
||||
returned array will be the number of derivatives by N by R.
|
||||
If `x` is a scalar, the middle dimension will be dropped; if
|
||||
the `yi` are scalars then the last dimension will be dropped.
|
||||
|
||||
See Also
|
||||
--------
|
||||
KroghInterpolator : Krogh interpolator
|
||||
|
||||
Notes
|
||||
-----
|
||||
Construction of the interpolating polynomial is a relatively expensive
|
||||
process. If you want to evaluate it repeatedly consider using the class
|
||||
KroghInterpolator (which is what this function uses).
|
||||
|
||||
Examples
|
||||
--------
|
||||
We can interpolate 2D observed data using Krogh interpolation:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> from scipy.interpolate import krogh_interpolate
|
||||
>>> x_observed = np.linspace(0.0, 10.0, 11)
|
||||
>>> y_observed = np.sin(x_observed)
|
||||
>>> x = np.linspace(min(x_observed), max(x_observed), num=100)
|
||||
>>> y = krogh_interpolate(x_observed, y_observed, x)
|
||||
>>> plt.plot(x_observed, y_observed, "o", label="observation")
|
||||
>>> plt.plot(x, y, label="krogh interpolation")
|
||||
>>> plt.legend()
|
||||
>>> plt.show()
|
||||
"""
|
||||
|
||||
P = KroghInterpolator(xi, yi, axis=axis)
|
||||
if der == 0:
|
||||
return P(x)
|
||||
elif _isscalar(der):
|
||||
return P.derivative(x, der=der)
|
||||
else:
|
||||
return P.derivatives(x, der=np.amax(der)+1)[der]
|
||||
|
||||
|
||||
def approximate_taylor_polynomial(f,x,degree,scale,order=None):
|
||||
"""
|
||||
Estimate the Taylor polynomial of f at x by polynomial fitting.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
f : callable
|
||||
The function whose Taylor polynomial is sought. Should accept
|
||||
a vector of `x` values.
|
||||
x : scalar
|
||||
The point at which the polynomial is to be evaluated.
|
||||
degree : int
|
||||
The degree of the Taylor polynomial
|
||||
scale : scalar
|
||||
The width of the interval to use to evaluate the Taylor polynomial.
|
||||
Function values spread over a range this wide are used to fit the
|
||||
polynomial. Must be chosen carefully.
|
||||
order : int or None, optional
|
||||
The order of the polynomial to be used in the fitting; `f` will be
|
||||
evaluated ``order+1`` times. If None, use `degree`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
p : poly1d instance
|
||||
The Taylor polynomial (translated to the origin, so that
|
||||
for example p(0)=f(x)).
|
||||
|
||||
Notes
|
||||
-----
|
||||
The appropriate choice of "scale" is a trade-off; too large and the
|
||||
function differs from its Taylor polynomial too much to get a good
|
||||
answer, too small and round-off errors overwhelm the higher-order terms.
|
||||
The algorithm used becomes numerically unstable around order 30 even
|
||||
under ideal circumstances.
|
||||
|
||||
Choosing order somewhat larger than degree may improve the higher-order
|
||||
terms.
|
||||
|
||||
Examples
|
||||
--------
|
||||
We can calculate Taylor approximation polynomials of sin function with
|
||||
various degrees:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> from scipy.interpolate import approximate_taylor_polynomial
|
||||
>>> x = np.linspace(-10.0, 10.0, num=100)
|
||||
>>> plt.plot(x, np.sin(x), label="sin curve")
|
||||
>>> for degree in np.arange(1, 15, step=2):
|
||||
... sin_taylor = approximate_taylor_polynomial(np.sin, 0, degree, 1,
|
||||
... order=degree + 2)
|
||||
... plt.plot(x, sin_taylor(x), label=f"degree={degree}")
|
||||
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left',
|
||||
... borderaxespad=0.0, shadow=True)
|
||||
>>> plt.tight_layout()
|
||||
>>> plt.axis([-10, 10, -10, 10])
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
if order is None:
|
||||
order = degree
|
||||
|
||||
n = order+1
|
||||
# Choose n points that cluster near the endpoints of the interval in
|
||||
# a way that avoids the Runge phenomenon. Ensure, by including the
|
||||
# endpoint or not as appropriate, that one point always falls at x
|
||||
# exactly.
|
||||
xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x
|
||||
|
||||
P = KroghInterpolator(xs, f(xs))
|
||||
d = P.derivatives(x,der=degree+1)
|
||||
|
||||
return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
|
||||
|
||||
|
||||
class BarycentricInterpolator(_Interpolator1DWithDerivatives):
|
||||
r"""Interpolating polynomial for a set of points.
|
||||
|
||||
Constructs a polynomial that passes through a given set of points.
|
||||
Allows evaluation of the polynomial and all its derivatives,
|
||||
efficient changing of the y-values to be interpolated,
|
||||
and updating by adding more x- and y-values.
|
||||
|
||||
For reasons of numerical stability, this function does not compute
|
||||
the coefficients of the polynomial.
|
||||
|
||||
The values `yi` need to be provided before the function is
|
||||
evaluated, but none of the preprocessing depends on them, so rapid
|
||||
updates are possible.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
xi : array_like, shape (npoints, )
|
||||
1-D array of x coordinates of the points the polynomial
|
||||
should pass through
|
||||
yi : array_like, shape (..., npoints, ...), optional
|
||||
N-D array of y coordinates of the points the polynomial should pass through.
|
||||
If None, the y values will be supplied later via the `set_y` method.
|
||||
The length of `yi` along the interpolation axis must be equal to the length
|
||||
of `xi`. Use the ``axis`` parameter to select correct axis.
|
||||
axis : int, optional
|
||||
Axis in the yi array corresponding to the x-coordinate values. Defaults
|
||||
to ``axis=0``.
|
||||
wi : array_like, optional
|
||||
The barycentric weights for the chosen interpolation points `xi`.
|
||||
If absent or None, the weights will be computed from `xi` (default).
|
||||
This allows for the reuse of the weights `wi` if several interpolants
|
||||
are being calculated using the same nodes `xi`, without re-computation.
|
||||
random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
|
||||
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
||||
singleton is used.
|
||||
If `seed` is an int, a new ``RandomState`` instance is used,
|
||||
seeded with `seed`.
|
||||
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
||||
that instance is used.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This class uses a "barycentric interpolation" method that treats
|
||||
the problem as a special case of rational function interpolation.
|
||||
This algorithm is quite stable, numerically, but even in a world of
|
||||
exact computation, unless the x coordinates are chosen very
|
||||
carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
|
||||
polynomial interpolation itself is a very ill-conditioned process
|
||||
due to the Runge phenomenon.
|
||||
|
||||
Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
|
||||
|
||||
Examples
|
||||
--------
|
||||
To produce a quintic barycentric interpolant approximating the function
|
||||
:math:`\sin x`, and its first four derivatives, using six randomly-spaced
|
||||
nodes in :math:`(0, \frac{\pi}{2})`:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> from scipy.interpolate import BarycentricInterpolator
|
||||
>>> rng = np.random.default_rng()
|
||||
>>> xi = rng.random(6) * np.pi/2
|
||||
>>> f, f_d1, f_d2, f_d3, f_d4 = np.sin, np.cos, lambda x: -np.sin(x), lambda x: -np.cos(x), np.sin
|
||||
>>> P = BarycentricInterpolator(xi, f(xi), random_state=rng)
|
||||
>>> fig, axs = plt.subplots(5, 1, sharex=True, layout='constrained', figsize=(7,10))
|
||||
>>> x = np.linspace(0, np.pi, 100)
|
||||
>>> axs[0].plot(x, P(x), 'r:', x, f(x), 'k--', xi, f(xi), 'xk')
|
||||
>>> axs[1].plot(x, P.derivative(x), 'r:', x, f_d1(x), 'k--', xi, f_d1(xi), 'xk')
|
||||
>>> axs[2].plot(x, P.derivative(x, 2), 'r:', x, f_d2(x), 'k--', xi, f_d2(xi), 'xk')
|
||||
>>> axs[3].plot(x, P.derivative(x, 3), 'r:', x, f_d3(x), 'k--', xi, f_d3(xi), 'xk')
|
||||
>>> axs[4].plot(x, P.derivative(x, 4), 'r:', x, f_d4(x), 'k--', xi, f_d4(xi), 'xk')
|
||||
>>> axs[0].set_xlim(0, np.pi)
|
||||
>>> axs[4].set_xlabel(r"$x$")
|
||||
>>> axs[4].set_xticks([i * np.pi / 4 for i in range(5)],
|
||||
... ["0", r"$\frac{\pi}{4}$", r"$\frac{\pi}{2}$", r"$\frac{3\pi}{4}$", r"$\pi$"])
|
||||
>>> axs[0].set_ylabel("$f(x)$")
|
||||
>>> axs[1].set_ylabel("$f'(x)$")
|
||||
>>> axs[2].set_ylabel("$f''(x)$")
|
||||
>>> axs[3].set_ylabel("$f^{(3)}(x)$")
|
||||
>>> axs[4].set_ylabel("$f^{(4)}(x)$")
|
||||
>>> labels = ['Interpolation nodes', 'True function $f$', 'Barycentric interpolation']
|
||||
>>> axs[0].legend(axs[0].get_lines()[::-1], labels, bbox_to_anchor=(0., 1.02, 1., .102),
|
||||
... loc='lower left', ncols=3, mode="expand", borderaxespad=0., frameon=False)
|
||||
>>> plt.show()
|
||||
""" # numpy/numpydoc#87 # noqa: E501
|
||||
|
||||
def __init__(self, xi, yi=None, axis=0, *, wi=None, random_state=None):
|
||||
super().__init__(xi, yi, axis)
|
||||
|
||||
random_state = check_random_state(random_state)
|
||||
|
||||
self.xi = np.asarray(xi, dtype=np.float64)
|
||||
self.set_yi(yi)
|
||||
self.n = len(self.xi)
|
||||
|
||||
# cache derivative object to avoid re-computing the weights with every call.
|
||||
self._diff_cij = None
|
||||
|
||||
if wi is not None:
|
||||
self.wi = wi
|
||||
else:
|
||||
# See page 510 of Berrut and Trefethen 2004 for an explanation of the
|
||||
# capacity scaling and the suggestion of using a random permutation of
|
||||
# the input factors.
|
||||
# At the moment, the permutation is not performed for xi that are
|
||||
# appended later through the add_xi interface. It's not clear to me how
|
||||
# to implement that and it seems that most situations that require
|
||||
# these numerical stability improvements will be able to provide all
|
||||
# the points to the constructor.
|
||||
self._inv_capacity = 4.0 / (np.max(self.xi) - np.min(self.xi))
|
||||
permute = random_state.permutation(self.n, )
|
||||
inv_permute = np.zeros(self.n, dtype=np.int32)
|
||||
inv_permute[permute] = np.arange(self.n)
|
||||
self.wi = np.zeros(self.n)
|
||||
|
||||
for i in range(self.n):
|
||||
dist = self._inv_capacity * (self.xi[i] - self.xi[permute])
|
||||
dist[inv_permute[i]] = 1.0
|
||||
prod = np.prod(dist)
|
||||
if prod == 0.0:
|
||||
raise ValueError("Interpolation points xi must be"
|
||||
" distinct.")
|
||||
self.wi[i] = 1.0 / prod
|
||||
|
||||
def set_yi(self, yi, axis=None):
|
||||
"""
|
||||
Update the y values to be interpolated
|
||||
|
||||
The barycentric interpolation algorithm requires the calculation
|
||||
of weights, but these depend only on the `xi`. The `yi` can be changed
|
||||
at any time.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
yi : array_like
|
||||
The y-coordinates of the points the polynomial will pass through.
|
||||
If None, the y values must be supplied later.
|
||||
axis : int, optional
|
||||
Axis in the `yi` array corresponding to the x-coordinate values.
|
||||
|
||||
"""
|
||||
if yi is None:
|
||||
self.yi = None
|
||||
return
|
||||
self._set_yi(yi, xi=self.xi, axis=axis)
|
||||
self.yi = self._reshape_yi(yi)
|
||||
self.n, self.r = self.yi.shape
|
||||
self._diff_baryint = None
|
||||
|
||||
def add_xi(self, xi, yi=None):
|
||||
"""
|
||||
Add more x values to the set to be interpolated
|
||||
|
||||
The barycentric interpolation algorithm allows easy updating by
|
||||
adding more points for the polynomial to pass through.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
xi : array_like
|
||||
The x coordinates of the points that the polynomial should pass
|
||||
through.
|
||||
yi : array_like, optional
|
||||
The y coordinates of the points the polynomial should pass through.
|
||||
Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is
|
||||
vector-valued.
|
||||
If `yi` is not given, the y values will be supplied later. `yi`
|
||||
should be given if and only if the interpolator has y values
|
||||
specified.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The new points added by `add_xi` are not randomly permuted
|
||||
so there is potential for numerical instability,
|
||||
especially for a large number of points. If this
|
||||
happens, please reconstruct interpolation from scratch instead.
|
||||
"""
|
||||
if yi is not None:
|
||||
if self.yi is None:
|
||||
raise ValueError("No previous yi value to update!")
|
||||
yi = self._reshape_yi(yi, check=True)
|
||||
self.yi = np.vstack((self.yi,yi))
|
||||
else:
|
||||
if self.yi is not None:
|
||||
raise ValueError("No update to yi provided!")
|
||||
old_n = self.n
|
||||
self.xi = np.concatenate((self.xi,xi))
|
||||
self.n = len(self.xi)
|
||||
self.wi **= -1
|
||||
old_wi = self.wi
|
||||
self.wi = np.zeros(self.n)
|
||||
self.wi[:old_n] = old_wi
|
||||
for j in range(old_n, self.n):
|
||||
self.wi[:j] *= self._inv_capacity * (self.xi[j]-self.xi[:j])
|
||||
self.wi[j] = np.multiply.reduce(
|
||||
self._inv_capacity * (self.xi[:j]-self.xi[j])
|
||||
)
|
||||
self.wi **= -1
|
||||
self._diff_cij = None
|
||||
self._diff_baryint = None
|
||||
|
||||
def __call__(self, x):
|
||||
"""Evaluate the interpolating polynomial at the points x
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
Point or points at which to evaluate the interpolant.
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : array_like
|
||||
Interpolated values. Shape is determined by replacing
|
||||
the interpolation axis in the original array with the shape of `x`.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Currently the code computes an outer product between `x` and the
|
||||
weights, that is, it constructs an intermediate array of size
|
||||
``(N, len(x))``, where N is the degree of the polynomial.
|
||||
"""
|
||||
return _Interpolator1D.__call__(self, x)
|
||||
|
||||
def _evaluate(self, x):
|
||||
if x.size == 0:
|
||||
p = np.zeros((0, self.r), dtype=self.dtype)
|
||||
else:
|
||||
c = x[..., np.newaxis] - self.xi
|
||||
z = c == 0
|
||||
c[z] = 1
|
||||
c = self.wi / c
|
||||
with np.errstate(divide='ignore'):
|
||||
p = np.dot(c, self.yi) / np.sum(c, axis=-1)[..., np.newaxis]
|
||||
# Now fix where x==some xi
|
||||
r = np.nonzero(z)
|
||||
if len(r) == 1: # evaluation at a scalar
|
||||
if len(r[0]) > 0: # equals one of the points
|
||||
p = self.yi[r[0][0]]
|
||||
else:
|
||||
p[r[:-1]] = self.yi[r[-1]]
|
||||
return p
|
||||
|
||||
def derivative(self, x, der=1):
|
||||
"""
|
||||
Evaluate a single derivative of the polynomial at the point x.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
Point or points at which to evaluate the derivatives
|
||||
der : integer, optional
|
||||
Which derivative to evaluate (default: first derivative).
|
||||
This number includes the function value as 0th derivative.
|
||||
|
||||
Returns
|
||||
-------
|
||||
d : ndarray
|
||||
Derivative interpolated at the x-points. Shape of `d` is
|
||||
determined by replacing the interpolation axis in the
|
||||
original array with the shape of `x`.
|
||||
"""
|
||||
x, x_shape = self._prepare_x(x)
|
||||
y = self._evaluate_derivatives(x, der+1, all_lower=False)
|
||||
return self._finish_y(y, x_shape)
|
||||
|
||||
def _evaluate_derivatives(self, x, der=None, all_lower=True):
|
||||
# NB: der here is not the order of the highest derivative;
|
||||
# instead, it is the size of the derivatives matrix that
|
||||
# would be returned with all_lower=True, including the
|
||||
# '0th' derivative (the undifferentiated function).
|
||||
# E.g. to evaluate the 5th derivative alone, call
|
||||
# _evaluate_derivatives(x, der=6, all_lower=False).
|
||||
|
||||
if (not all_lower) and (x.size == 0 or self.r == 0):
|
||||
return np.zeros((0, self.r), dtype=self.dtype)
|
||||
|
||||
if (not all_lower) and der == 1:
|
||||
return self._evaluate(x)
|
||||
|
||||
if (not all_lower) and (der > self.n):
|
||||
return np.zeros((len(x), self.r), dtype=self.dtype)
|
||||
|
||||
if der is None:
|
||||
der = self.n
|
||||
|
||||
if all_lower and (x.size == 0 or self.r == 0):
|
||||
return np.zeros((der, len(x), self.r), dtype=self.dtype)
|
||||
|
||||
if self._diff_cij is None:
|
||||
# c[i,j] = xi[i] - xi[j]
|
||||
c = self.xi[:, np.newaxis] - self.xi
|
||||
|
||||
# avoid division by 0 (diagonal entries are so far zero by construction)
|
||||
np.fill_diagonal(c, 1)
|
||||
|
||||
# c[i,j] = (w[j] / w[i]) / (xi[i] - xi[j]) (equation 9.4)
|
||||
c = self.wi/ (c * self.wi[..., np.newaxis])
|
||||
|
||||
# fill in correct diagonal entries: each column sums to 0
|
||||
np.fill_diagonal(c, 0)
|
||||
|
||||
# calculate diagonal
|
||||
# c[j,j] = -sum_{i != j} c[i,j] (equation 9.5)
|
||||
d = -c.sum(axis=1)
|
||||
# c[i,j] = l_j(x_i)
|
||||
np.fill_diagonal(c, d)
|
||||
|
||||
self._diff_cij = c
|
||||
|
||||
if self._diff_baryint is None:
|
||||
# initialise and cache derivative interpolator and cijs;
|
||||
# reuse weights wi (which depend only on interpolation points xi),
|
||||
# to avoid unnecessary re-computation
|
||||
self._diff_baryint = BarycentricInterpolator(xi=self.xi,
|
||||
yi=self._diff_cij @ self.yi,
|
||||
wi=self.wi)
|
||||
self._diff_baryint._diff_cij = self._diff_cij
|
||||
|
||||
if all_lower:
|
||||
# assemble matrix of derivatives from order 0 to order der-1,
|
||||
# in the format required by _Interpolator1DWithDerivatives.
|
||||
cn = np.zeros((der, len(x), self.r), dtype=self.dtype)
|
||||
for d in range(der):
|
||||
cn[d, :, :] = self._evaluate_derivatives(x, d+1, all_lower=False)
|
||||
return cn
|
||||
|
||||
# recursively evaluate only the derivative requested
|
||||
return self._diff_baryint._evaluate_derivatives(x, der-1, all_lower=False)
|
||||
|
||||
|
||||
def barycentric_interpolate(xi, yi, x, axis=0, *, der=0):
|
||||
"""
|
||||
Convenience function for polynomial interpolation.
|
||||
|
||||
Constructs a polynomial that passes through a given set of points,
|
||||
then evaluates the polynomial. For reasons of numerical stability,
|
||||
this function does not compute the coefficients of the polynomial.
|
||||
|
||||
This function uses a "barycentric interpolation" method that treats
|
||||
the problem as a special case of rational function interpolation.
|
||||
This algorithm is quite stable, numerically, but even in a world of
|
||||
exact computation, unless the `x` coordinates are chosen very
|
||||
carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
|
||||
polynomial interpolation itself is a very ill-conditioned process
|
||||
due to the Runge phenomenon.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
xi : array_like
|
||||
1-D array of x coordinates of the points the polynomial should
|
||||
pass through
|
||||
yi : array_like
|
||||
The y coordinates of the points the polynomial should pass through.
|
||||
x : scalar or array_like
|
||||
Point or points at which to evaluate the interpolant.
|
||||
der : int or list or None, optional
|
||||
How many derivatives to evaluate, or None for all potentially
|
||||
nonzero derivatives (that is, a number equal to the number
|
||||
of points), or a list of derivatives to evaluate. This number
|
||||
includes the function value as the '0th' derivative.
|
||||
axis : int, optional
|
||||
Axis in the `yi` array corresponding to the x-coordinate values.
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : scalar or array_like
|
||||
Interpolated values. Shape is determined by replacing
|
||||
the interpolation axis in the original array with the shape of `x`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
BarycentricInterpolator : Barycentric interpolator
|
||||
|
||||
Notes
|
||||
-----
|
||||
Construction of the interpolation weights is a relatively slow process.
|
||||
If you want to call this many times with the same xi (but possibly
|
||||
varying yi or x) you should use the class `BarycentricInterpolator`.
|
||||
This is what this function uses internally.
|
||||
|
||||
Examples
|
||||
--------
|
||||
We can interpolate 2D observed data using barycentric interpolation:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> from scipy.interpolate import barycentric_interpolate
|
||||
>>> x_observed = np.linspace(0.0, 10.0, 11)
|
||||
>>> y_observed = np.sin(x_observed)
|
||||
>>> x = np.linspace(min(x_observed), max(x_observed), num=100)
|
||||
>>> y = barycentric_interpolate(x_observed, y_observed, x)
|
||||
>>> plt.plot(x_observed, y_observed, "o", label="observation")
|
||||
>>> plt.plot(x, y, label="barycentric interpolation")
|
||||
>>> plt.legend()
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
P = BarycentricInterpolator(xi, yi, axis=axis)
|
||||
if der == 0:
|
||||
return P(x)
|
||||
elif _isscalar(der):
|
||||
return P.derivative(x, der=der)
|
||||
else:
|
||||
return P.derivatives(x, der=np.amax(der)+1)[der]
|
||||
Binary file not shown.
290
venv/lib/python3.12/site-packages/scipy/interpolate/_rbf.py
Normal file
290
venv/lib/python3.12/site-packages/scipy/interpolate/_rbf.py
Normal file
@ -0,0 +1,290 @@
|
||||
"""rbf - Radial basis functions for interpolation/smoothing scattered N-D data.
|
||||
|
||||
Written by John Travers <jtravs@gmail.com>, February 2007
|
||||
Based closely on Matlab code by Alex Chirokov
|
||||
Additional, large, improvements by Robert Hetland
|
||||
Some additional alterations by Travis Oliphant
|
||||
Interpolation with multi-dimensional target domain by Josua Sassen
|
||||
|
||||
Permission to use, modify, and distribute this software is given under the
|
||||
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
|
||||
this distribution for specifics.
|
||||
|
||||
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
|
||||
|
||||
Copyright (c) 2006-2007, Robert Hetland <hetland@tamu.edu>
|
||||
Copyright (c) 2007, John Travers <jtravs@gmail.com>
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials provided
|
||||
with the distribution.
|
||||
|
||||
* Neither the name of Robert Hetland nor the names of any
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
from scipy import linalg
|
||||
from scipy.special import xlogy
|
||||
from scipy.spatial.distance import cdist, pdist, squareform
|
||||
|
||||
__all__ = ['Rbf']
|
||||
|
||||
|
||||
class Rbf:
|
||||
"""
|
||||
Rbf(*args, **kwargs)
|
||||
|
||||
A class for radial basis function interpolation of functions from
|
||||
N-D scattered data to an M-D domain.
|
||||
|
||||
.. legacy:: class
|
||||
|
||||
`Rbf` is legacy code, for new usage please use `RBFInterpolator`
|
||||
instead.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
*args : arrays
|
||||
x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
|
||||
and d is the array of values at the nodes
|
||||
function : str or callable, optional
|
||||
The radial basis function, based on the radius, r, given by the norm
|
||||
(default is Euclidean distance); the default is 'multiquadric'::
|
||||
|
||||
'multiquadric': sqrt((r/self.epsilon)**2 + 1)
|
||||
'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
|
||||
'gaussian': exp(-(r/self.epsilon)**2)
|
||||
'linear': r
|
||||
'cubic': r**3
|
||||
'quintic': r**5
|
||||
'thin_plate': r**2 * log(r)
|
||||
|
||||
If callable, then it must take 2 arguments (self, r). The epsilon
|
||||
parameter will be available as self.epsilon. Other keyword
|
||||
arguments passed in will be available as well.
|
||||
|
||||
epsilon : float, optional
|
||||
Adjustable constant for gaussian or multiquadrics functions
|
||||
- defaults to approximate average distance between nodes (which is
|
||||
a good start).
|
||||
smooth : float, optional
|
||||
Values greater than zero increase the smoothness of the
|
||||
approximation. 0 is for interpolation (default), the function will
|
||||
always go through the nodal points in this case.
|
||||
norm : str, callable, optional
|
||||
A function that returns the 'distance' between two points, with
|
||||
inputs as arrays of positions (x, y, z, ...), and an output as an
|
||||
array of distance. E.g., the default: 'euclidean', such that the result
|
||||
is a matrix of the distances from each point in ``x1`` to each point in
|
||||
``x2``. For more options, see documentation of
|
||||
`scipy.spatial.distances.cdist`.
|
||||
mode : str, optional
|
||||
Mode of the interpolation, can be '1-D' (default) or 'N-D'. When it is
|
||||
'1-D' the data `d` will be considered as 1-D and flattened
|
||||
internally. When it is 'N-D' the data `d` is assumed to be an array of
|
||||
shape (n_samples, m), where m is the dimension of the target domain.
|
||||
|
||||
|
||||
Attributes
|
||||
----------
|
||||
N : int
|
||||
The number of data points (as determined by the input arrays).
|
||||
di : ndarray
|
||||
The 1-D array of data values at each of the data coordinates `xi`.
|
||||
xi : ndarray
|
||||
The 2-D array of data coordinates.
|
||||
function : str or callable
|
||||
The radial basis function. See description under Parameters.
|
||||
epsilon : float
|
||||
Parameter used by gaussian or multiquadrics functions. See Parameters.
|
||||
smooth : float
|
||||
Smoothing parameter. See description under Parameters.
|
||||
norm : str or callable
|
||||
The distance function. See description under Parameters.
|
||||
mode : str
|
||||
Mode of the interpolation. See description under Parameters.
|
||||
nodes : ndarray
|
||||
A 1-D array of node values for the interpolation.
|
||||
A : internal property, do not use
|
||||
|
||||
See Also
|
||||
--------
|
||||
RBFInterpolator
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from scipy.interpolate import Rbf
|
||||
>>> rng = np.random.default_rng()
|
||||
>>> x, y, z, d = rng.random((4, 50))
|
||||
>>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
|
||||
>>> xi = yi = zi = np.linspace(0, 1, 20)
|
||||
>>> di = rbfi(xi, yi, zi) # interpolated values
|
||||
>>> di.shape
|
||||
(20,)
|
||||
|
||||
"""
|
||||
# Available radial basis functions that can be selected as strings;
|
||||
# they all start with _h_ (self._init_function relies on that)
|
||||
def _h_multiquadric(self, r):
|
||||
return np.sqrt((1.0/self.epsilon*r)**2 + 1)
|
||||
|
||||
def _h_inverse_multiquadric(self, r):
|
||||
return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1)
|
||||
|
||||
def _h_gaussian(self, r):
|
||||
return np.exp(-(1.0/self.epsilon*r)**2)
|
||||
|
||||
def _h_linear(self, r):
|
||||
return r
|
||||
|
||||
def _h_cubic(self, r):
|
||||
return r**3
|
||||
|
||||
def _h_quintic(self, r):
|
||||
return r**5
|
||||
|
||||
def _h_thin_plate(self, r):
|
||||
return xlogy(r**2, r)
|
||||
|
||||
# Setup self._function and do smoke test on initial r
|
||||
def _init_function(self, r):
|
||||
if isinstance(self.function, str):
|
||||
self.function = self.function.lower()
|
||||
_mapped = {'inverse': 'inverse_multiquadric',
|
||||
'inverse multiquadric': 'inverse_multiquadric',
|
||||
'thin-plate': 'thin_plate'}
|
||||
if self.function in _mapped:
|
||||
self.function = _mapped[self.function]
|
||||
|
||||
func_name = "_h_" + self.function
|
||||
if hasattr(self, func_name):
|
||||
self._function = getattr(self, func_name)
|
||||
else:
|
||||
functionlist = [x[3:] for x in dir(self)
|
||||
if x.startswith('_h_')]
|
||||
raise ValueError("function must be a callable or one of " +
|
||||
", ".join(functionlist))
|
||||
self._function = getattr(self, "_h_"+self.function)
|
||||
elif callable(self.function):
|
||||
allow_one = False
|
||||
if hasattr(self.function, 'func_code') or \
|
||||
hasattr(self.function, '__code__'):
|
||||
val = self.function
|
||||
allow_one = True
|
||||
elif hasattr(self.function, "__call__"):
|
||||
val = self.function.__call__.__func__
|
||||
else:
|
||||
raise ValueError("Cannot determine number of arguments to "
|
||||
"function")
|
||||
|
||||
argcount = val.__code__.co_argcount
|
||||
if allow_one and argcount == 1:
|
||||
self._function = self.function
|
||||
elif argcount == 2:
|
||||
self._function = self.function.__get__(self, Rbf)
|
||||
else:
|
||||
raise ValueError("Function argument must take 1 or 2 "
|
||||
"arguments.")
|
||||
|
||||
a0 = self._function(r)
|
||||
if a0.shape != r.shape:
|
||||
raise ValueError("Callable must take array and return array of "
|
||||
"the same shape")
|
||||
return a0
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
# `args` can be a variable number of arrays; we flatten them and store
|
||||
# them as a single 2-D array `xi` of shape (n_args-1, array_size),
|
||||
# plus a 1-D array `di` for the values.
|
||||
# All arrays must have the same number of elements
|
||||
self.xi = np.asarray([np.asarray(a, dtype=np.float64).flatten()
|
||||
for a in args[:-1]])
|
||||
self.N = self.xi.shape[-1]
|
||||
|
||||
self.mode = kwargs.pop('mode', '1-D')
|
||||
|
||||
if self.mode == '1-D':
|
||||
self.di = np.asarray(args[-1]).flatten()
|
||||
self._target_dim = 1
|
||||
elif self.mode == 'N-D':
|
||||
self.di = np.asarray(args[-1])
|
||||
self._target_dim = self.di.shape[-1]
|
||||
else:
|
||||
raise ValueError("Mode has to be 1-D or N-D.")
|
||||
|
||||
if not all([x.size == self.di.shape[0] for x in self.xi]):
|
||||
raise ValueError("All arrays must be equal length.")
|
||||
|
||||
self.norm = kwargs.pop('norm', 'euclidean')
|
||||
self.epsilon = kwargs.pop('epsilon', None)
|
||||
if self.epsilon is None:
|
||||
# default epsilon is the "the average distance between nodes" based
|
||||
# on a bounding hypercube
|
||||
ximax = np.amax(self.xi, axis=1)
|
||||
ximin = np.amin(self.xi, axis=1)
|
||||
edges = ximax - ximin
|
||||
edges = edges[np.nonzero(edges)]
|
||||
self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size)
|
||||
|
||||
self.smooth = kwargs.pop('smooth', 0.0)
|
||||
self.function = kwargs.pop('function', 'multiquadric')
|
||||
|
||||
# attach anything left in kwargs to self for use by any user-callable
|
||||
# function or to save on the object returned.
|
||||
for item, value in kwargs.items():
|
||||
setattr(self, item, value)
|
||||
|
||||
# Compute weights
|
||||
if self._target_dim > 1: # If we have more than one target dimension,
|
||||
# we first factorize the matrix
|
||||
self.nodes = np.zeros((self.N, self._target_dim), dtype=self.di.dtype)
|
||||
lu, piv = linalg.lu_factor(self.A)
|
||||
for i in range(self._target_dim):
|
||||
self.nodes[:, i] = linalg.lu_solve((lu, piv), self.di[:, i])
|
||||
else:
|
||||
self.nodes = linalg.solve(self.A, self.di)
|
||||
|
||||
@property
|
||||
def A(self):
|
||||
# this only exists for backwards compatibility: self.A was available
|
||||
# and, at least technically, public.
|
||||
r = squareform(pdist(self.xi.T, self.norm)) # Pairwise norm
|
||||
return self._init_function(r) - np.eye(self.N)*self.smooth
|
||||
|
||||
def _call_norm(self, x1, x2):
|
||||
return cdist(x1.T, x2.T, self.norm)
|
||||
|
||||
def __call__(self, *args):
|
||||
args = [np.asarray(x) for x in args]
|
||||
if not all([x.shape == y.shape for x in args for y in args]):
|
||||
raise ValueError("Array lengths must be equal")
|
||||
if self._target_dim > 1:
|
||||
shp = args[0].shape + (self._target_dim,)
|
||||
else:
|
||||
shp = args[0].shape
|
||||
xa = np.asarray([a.flatten() for a in args], dtype=np.float64)
|
||||
r = self._call_norm(xa, self.xi)
|
||||
return np.dot(self._function(r), self.nodes).reshape(shp)
|
||||
@ -0,0 +1,550 @@
|
||||
"""Module for RBF interpolation."""
|
||||
import warnings
|
||||
from itertools import combinations_with_replacement
|
||||
|
||||
import numpy as np
|
||||
from numpy.linalg import LinAlgError
|
||||
from scipy.spatial import KDTree
|
||||
from scipy.special import comb
|
||||
from scipy.linalg.lapack import dgesv # type: ignore[attr-defined]
|
||||
|
||||
from ._rbfinterp_pythran import (_build_system,
|
||||
_build_evaluation_coefficients,
|
||||
_polynomial_matrix)
|
||||
|
||||
|
||||
__all__ = ["RBFInterpolator"]
|
||||
|
||||
|
||||
# These RBFs are implemented.
|
||||
_AVAILABLE = {
|
||||
"linear",
|
||||
"thin_plate_spline",
|
||||
"cubic",
|
||||
"quintic",
|
||||
"multiquadric",
|
||||
"inverse_multiquadric",
|
||||
"inverse_quadratic",
|
||||
"gaussian"
|
||||
}
|
||||
|
||||
|
||||
# The shape parameter does not need to be specified when using these RBFs.
|
||||
_SCALE_INVARIANT = {"linear", "thin_plate_spline", "cubic", "quintic"}
|
||||
|
||||
|
||||
# For RBFs that are conditionally positive definite of order m, the interpolant
|
||||
# should include polynomial terms with degree >= m - 1. Define the minimum
|
||||
# degrees here. These values are from Chapter 8 of Fasshauer's "Meshfree
|
||||
# Approximation Methods with MATLAB". The RBFs that are not in this dictionary
|
||||
# are positive definite and do not need polynomial terms.
|
||||
_NAME_TO_MIN_DEGREE = {
|
||||
"multiquadric": 0,
|
||||
"linear": 0,
|
||||
"thin_plate_spline": 1,
|
||||
"cubic": 1,
|
||||
"quintic": 2
|
||||
}
|
||||
|
||||
|
||||
def _monomial_powers(ndim, degree):
|
||||
"""Return the powers for each monomial in a polynomial.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndim : int
|
||||
Number of variables in the polynomial.
|
||||
degree : int
|
||||
Degree of the polynomial.
|
||||
|
||||
Returns
|
||||
-------
|
||||
(nmonos, ndim) int ndarray
|
||||
Array where each row contains the powers for each variable in a
|
||||
monomial.
|
||||
|
||||
"""
|
||||
nmonos = comb(degree + ndim, ndim, exact=True)
|
||||
out = np.zeros((nmonos, ndim), dtype=np.dtype("long"))
|
||||
count = 0
|
||||
for deg in range(degree + 1):
|
||||
for mono in combinations_with_replacement(range(ndim), deg):
|
||||
# `mono` is a tuple of variables in the current monomial with
|
||||
# multiplicity indicating power (e.g., (0, 1, 1) represents x*y**2)
|
||||
for var in mono:
|
||||
out[count, var] += 1
|
||||
|
||||
count += 1
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def _build_and_solve_system(y, d, smoothing, kernel, epsilon, powers):
|
||||
"""Build and solve the RBF interpolation system of equations.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
y : (P, N) float ndarray
|
||||
Data point coordinates.
|
||||
d : (P, S) float ndarray
|
||||
Data values at `y`.
|
||||
smoothing : (P,) float ndarray
|
||||
Smoothing parameter for each data point.
|
||||
kernel : str
|
||||
Name of the RBF.
|
||||
epsilon : float
|
||||
Shape parameter.
|
||||
powers : (R, N) int ndarray
|
||||
The exponents for each monomial in the polynomial.
|
||||
|
||||
Returns
|
||||
-------
|
||||
coeffs : (P + R, S) float ndarray
|
||||
Coefficients for each RBF and monomial.
|
||||
shift : (N,) float ndarray
|
||||
Domain shift used to create the polynomial matrix.
|
||||
scale : (N,) float ndarray
|
||||
Domain scaling used to create the polynomial matrix.
|
||||
|
||||
"""
|
||||
lhs, rhs, shift, scale = _build_system(
|
||||
y, d, smoothing, kernel, epsilon, powers
|
||||
)
|
||||
_, _, coeffs, info = dgesv(lhs, rhs, overwrite_a=True, overwrite_b=True)
|
||||
if info < 0:
|
||||
raise ValueError(f"The {-info}-th argument had an illegal value.")
|
||||
elif info > 0:
|
||||
msg = "Singular matrix."
|
||||
nmonos = powers.shape[0]
|
||||
if nmonos > 0:
|
||||
pmat = _polynomial_matrix((y - shift)/scale, powers)
|
||||
rank = np.linalg.matrix_rank(pmat)
|
||||
if rank < nmonos:
|
||||
msg = (
|
||||
"Singular matrix. The matrix of monomials evaluated at "
|
||||
"the data point coordinates does not have full column "
|
||||
f"rank ({rank}/{nmonos})."
|
||||
)
|
||||
|
||||
raise LinAlgError(msg)
|
||||
|
||||
return shift, scale, coeffs
|
||||
|
||||
|
||||
class RBFInterpolator:
|
||||
"""Radial basis function (RBF) interpolation in N dimensions.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
y : (npoints, ndims) array_like
|
||||
2-D array of data point coordinates.
|
||||
d : (npoints, ...) array_like
|
||||
N-D array of data values at `y`. The length of `d` along the first
|
||||
axis must be equal to the length of `y`. Unlike some interpolators, the
|
||||
interpolation axis cannot be changed.
|
||||
neighbors : int, optional
|
||||
If specified, the value of the interpolant at each evaluation point
|
||||
will be computed using only this many nearest data points. All the data
|
||||
points are used by default.
|
||||
smoothing : float or (npoints, ) array_like, optional
|
||||
Smoothing parameter. The interpolant perfectly fits the data when this
|
||||
is set to 0. For large values, the interpolant approaches a least
|
||||
squares fit of a polynomial with the specified degree. Default is 0.
|
||||
kernel : str, optional
|
||||
Type of RBF. This should be one of
|
||||
|
||||
- 'linear' : ``-r``
|
||||
- 'thin_plate_spline' : ``r**2 * log(r)``
|
||||
- 'cubic' : ``r**3``
|
||||
- 'quintic' : ``-r**5``
|
||||
- 'multiquadric' : ``-sqrt(1 + r**2)``
|
||||
- 'inverse_multiquadric' : ``1/sqrt(1 + r**2)``
|
||||
- 'inverse_quadratic' : ``1/(1 + r**2)``
|
||||
- 'gaussian' : ``exp(-r**2)``
|
||||
|
||||
Default is 'thin_plate_spline'.
|
||||
epsilon : float, optional
|
||||
Shape parameter that scales the input to the RBF. If `kernel` is
|
||||
'linear', 'thin_plate_spline', 'cubic', or 'quintic', this defaults to
|
||||
1 and can be ignored because it has the same effect as scaling the
|
||||
smoothing parameter. Otherwise, this must be specified.
|
||||
degree : int, optional
|
||||
Degree of the added polynomial. For some RBFs the interpolant may not
|
||||
be well-posed if the polynomial degree is too small. Those RBFs and
|
||||
their corresponding minimum degrees are
|
||||
|
||||
- 'multiquadric' : 0
|
||||
- 'linear' : 0
|
||||
- 'thin_plate_spline' : 1
|
||||
- 'cubic' : 1
|
||||
- 'quintic' : 2
|
||||
|
||||
The default value is the minimum degree for `kernel` or 0 if there is
|
||||
no minimum degree. Set this to -1 for no added polynomial.
|
||||
|
||||
Notes
|
||||
-----
|
||||
An RBF is a scalar valued function in N-dimensional space whose value at
|
||||
:math:`x` can be expressed in terms of :math:`r=||x - c||`, where :math:`c`
|
||||
is the center of the RBF.
|
||||
|
||||
An RBF interpolant for the vector of data values :math:`d`, which are from
|
||||
locations :math:`y`, is a linear combination of RBFs centered at :math:`y`
|
||||
plus a polynomial with a specified degree. The RBF interpolant is written
|
||||
as
|
||||
|
||||
.. math::
|
||||
f(x) = K(x, y) a + P(x) b,
|
||||
|
||||
where :math:`K(x, y)` is a matrix of RBFs with centers at :math:`y`
|
||||
evaluated at the points :math:`x`, and :math:`P(x)` is a matrix of
|
||||
monomials, which span polynomials with the specified degree, evaluated at
|
||||
:math:`x`. The coefficients :math:`a` and :math:`b` are the solution to the
|
||||
linear equations
|
||||
|
||||
.. math::
|
||||
(K(y, y) + \\lambda I) a + P(y) b = d
|
||||
|
||||
and
|
||||
|
||||
.. math::
|
||||
P(y)^T a = 0,
|
||||
|
||||
where :math:`\\lambda` is a non-negative smoothing parameter that controls
|
||||
how well we want to fit the data. The data are fit exactly when the
|
||||
smoothing parameter is 0.
|
||||
|
||||
The above system is uniquely solvable if the following requirements are
|
||||
met:
|
||||
|
||||
- :math:`P(y)` must have full column rank. :math:`P(y)` always has full
|
||||
column rank when `degree` is -1 or 0. When `degree` is 1,
|
||||
:math:`P(y)` has full column rank if the data point locations are not
|
||||
all collinear (N=2), coplanar (N=3), etc.
|
||||
- If `kernel` is 'multiquadric', 'linear', 'thin_plate_spline',
|
||||
'cubic', or 'quintic', then `degree` must not be lower than the
|
||||
minimum value listed above.
|
||||
- If `smoothing` is 0, then each data point location must be distinct.
|
||||
|
||||
When using an RBF that is not scale invariant ('multiquadric',
|
||||
'inverse_multiquadric', 'inverse_quadratic', or 'gaussian'), an appropriate
|
||||
shape parameter must be chosen (e.g., through cross validation). Smaller
|
||||
values for the shape parameter correspond to wider RBFs. The problem can
|
||||
become ill-conditioned or singular when the shape parameter is too small.
|
||||
|
||||
The memory required to solve for the RBF interpolation coefficients
|
||||
increases quadratically with the number of data points, which can become
|
||||
impractical when interpolating more than about a thousand data points.
|
||||
To overcome memory limitations for large interpolation problems, the
|
||||
`neighbors` argument can be specified to compute an RBF interpolant for
|
||||
each evaluation point using only the nearest data points.
|
||||
|
||||
.. versionadded:: 1.7.0
|
||||
|
||||
See Also
|
||||
--------
|
||||
NearestNDInterpolator
|
||||
LinearNDInterpolator
|
||||
CloughTocher2DInterpolator
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Fasshauer, G., 2007. Meshfree Approximation Methods with Matlab.
|
||||
World Scientific Publishing Co.
|
||||
|
||||
.. [2] http://amadeus.math.iit.edu/~fass/603_ch3.pdf
|
||||
|
||||
.. [3] Wahba, G., 1990. Spline Models for Observational Data. SIAM.
|
||||
|
||||
.. [4] http://pages.stat.wisc.edu/~wahba/stat860public/lect/lect8/lect8.pdf
|
||||
|
||||
Examples
|
||||
--------
|
||||
Demonstrate interpolating scattered data to a grid in 2-D.
|
||||
|
||||
>>> import numpy as np
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> from scipy.interpolate import RBFInterpolator
|
||||
>>> from scipy.stats.qmc import Halton
|
||||
|
||||
>>> rng = np.random.default_rng()
|
||||
>>> xobs = 2*Halton(2, seed=rng).random(100) - 1
|
||||
>>> yobs = np.sum(xobs, axis=1)*np.exp(-6*np.sum(xobs**2, axis=1))
|
||||
|
||||
>>> xgrid = np.mgrid[-1:1:50j, -1:1:50j]
|
||||
>>> xflat = xgrid.reshape(2, -1).T
|
||||
>>> yflat = RBFInterpolator(xobs, yobs)(xflat)
|
||||
>>> ygrid = yflat.reshape(50, 50)
|
||||
|
||||
>>> fig, ax = plt.subplots()
|
||||
>>> ax.pcolormesh(*xgrid, ygrid, vmin=-0.25, vmax=0.25, shading='gouraud')
|
||||
>>> p = ax.scatter(*xobs.T, c=yobs, s=50, ec='k', vmin=-0.25, vmax=0.25)
|
||||
>>> fig.colorbar(p)
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, y, d,
|
||||
neighbors=None,
|
||||
smoothing=0.0,
|
||||
kernel="thin_plate_spline",
|
||||
epsilon=None,
|
||||
degree=None):
|
||||
y = np.asarray(y, dtype=float, order="C")
|
||||
if y.ndim != 2:
|
||||
raise ValueError("`y` must be a 2-dimensional array.")
|
||||
|
||||
ny, ndim = y.shape
|
||||
|
||||
d_dtype = complex if np.iscomplexobj(d) else float
|
||||
d = np.asarray(d, dtype=d_dtype, order="C")
|
||||
if d.shape[0] != ny:
|
||||
raise ValueError(
|
||||
f"Expected the first axis of `d` to have length {ny}."
|
||||
)
|
||||
|
||||
d_shape = d.shape[1:]
|
||||
d = d.reshape((ny, -1))
|
||||
# If `d` is complex, convert it to a float array with twice as many
|
||||
# columns. Otherwise, the LHS matrix would need to be converted to
|
||||
# complex and take up 2x more memory than necessary.
|
||||
d = d.view(float)
|
||||
|
||||
if np.isscalar(smoothing):
|
||||
smoothing = np.full(ny, smoothing, dtype=float)
|
||||
else:
|
||||
smoothing = np.asarray(smoothing, dtype=float, order="C")
|
||||
if smoothing.shape != (ny,):
|
||||
raise ValueError(
|
||||
"Expected `smoothing` to be a scalar or have shape "
|
||||
f"({ny},)."
|
||||
)
|
||||
|
||||
kernel = kernel.lower()
|
||||
if kernel not in _AVAILABLE:
|
||||
raise ValueError(f"`kernel` must be one of {_AVAILABLE}.")
|
||||
|
||||
if epsilon is None:
|
||||
if kernel in _SCALE_INVARIANT:
|
||||
epsilon = 1.0
|
||||
else:
|
||||
raise ValueError(
|
||||
"`epsilon` must be specified if `kernel` is not one of "
|
||||
f"{_SCALE_INVARIANT}."
|
||||
)
|
||||
else:
|
||||
epsilon = float(epsilon)
|
||||
|
||||
min_degree = _NAME_TO_MIN_DEGREE.get(kernel, -1)
|
||||
if degree is None:
|
||||
degree = max(min_degree, 0)
|
||||
else:
|
||||
degree = int(degree)
|
||||
if degree < -1:
|
||||
raise ValueError("`degree` must be at least -1.")
|
||||
elif -1 < degree < min_degree:
|
||||
warnings.warn(
|
||||
f"`degree` should not be below {min_degree} except -1 "
|
||||
f"when `kernel` is '{kernel}'."
|
||||
f"The interpolant may not be uniquely "
|
||||
f"solvable, and the smoothing parameter may have an "
|
||||
f"unintuitive effect.",
|
||||
UserWarning, stacklevel=2
|
||||
)
|
||||
|
||||
if neighbors is None:
|
||||
nobs = ny
|
||||
else:
|
||||
# Make sure the number of nearest neighbors used for interpolation
|
||||
# does not exceed the number of observations.
|
||||
neighbors = int(min(neighbors, ny))
|
||||
nobs = neighbors
|
||||
|
||||
powers = _monomial_powers(ndim, degree)
|
||||
# The polynomial matrix must have full column rank in order for the
|
||||
# interpolant to be well-posed, which is not possible if there are
|
||||
# fewer observations than monomials.
|
||||
if powers.shape[0] > nobs:
|
||||
raise ValueError(
|
||||
f"At least {powers.shape[0]} data points are required when "
|
||||
f"`degree` is {degree} and the number of dimensions is {ndim}."
|
||||
)
|
||||
|
||||
if neighbors is None:
|
||||
shift, scale, coeffs = _build_and_solve_system(
|
||||
y, d, smoothing, kernel, epsilon, powers
|
||||
)
|
||||
|
||||
# Make these attributes private since they do not always exist.
|
||||
self._shift = shift
|
||||
self._scale = scale
|
||||
self._coeffs = coeffs
|
||||
|
||||
else:
|
||||
self._tree = KDTree(y)
|
||||
|
||||
self.y = y
|
||||
self.d = d
|
||||
self.d_shape = d_shape
|
||||
self.d_dtype = d_dtype
|
||||
self.neighbors = neighbors
|
||||
self.smoothing = smoothing
|
||||
self.kernel = kernel
|
||||
self.epsilon = epsilon
|
||||
self.powers = powers
|
||||
|
||||
def _chunk_evaluator(
|
||||
self,
|
||||
x,
|
||||
y,
|
||||
shift,
|
||||
scale,
|
||||
coeffs,
|
||||
memory_budget=1000000
|
||||
):
|
||||
"""
|
||||
Evaluate the interpolation while controlling memory consumption.
|
||||
We chunk the input if we need more memory than specified.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : (Q, N) float ndarray
|
||||
array of points on which to evaluate
|
||||
y: (P, N) float ndarray
|
||||
array of points on which we know function values
|
||||
shift: (N, ) ndarray
|
||||
Domain shift used to create the polynomial matrix.
|
||||
scale : (N,) float ndarray
|
||||
Domain scaling used to create the polynomial matrix.
|
||||
coeffs: (P+R, S) float ndarray
|
||||
Coefficients in front of basis functions
|
||||
memory_budget: int
|
||||
Total amount of memory (in units of sizeof(float)) we wish
|
||||
to devote for storing the array of coefficients for
|
||||
interpolated points. If we need more memory than that, we
|
||||
chunk the input.
|
||||
|
||||
Returns
|
||||
-------
|
||||
(Q, S) float ndarray
|
||||
Interpolated array
|
||||
"""
|
||||
nx, ndim = x.shape
|
||||
if self.neighbors is None:
|
||||
nnei = len(y)
|
||||
else:
|
||||
nnei = self.neighbors
|
||||
# in each chunk we consume the same space we already occupy
|
||||
chunksize = memory_budget // (self.powers.shape[0] + nnei) + 1
|
||||
if chunksize <= nx:
|
||||
out = np.empty((nx, self.d.shape[1]), dtype=float)
|
||||
for i in range(0, nx, chunksize):
|
||||
vec = _build_evaluation_coefficients(
|
||||
x[i:i + chunksize, :],
|
||||
y,
|
||||
self.kernel,
|
||||
self.epsilon,
|
||||
self.powers,
|
||||
shift,
|
||||
scale)
|
||||
out[i:i + chunksize, :] = np.dot(vec, coeffs)
|
||||
else:
|
||||
vec = _build_evaluation_coefficients(
|
||||
x,
|
||||
y,
|
||||
self.kernel,
|
||||
self.epsilon,
|
||||
self.powers,
|
||||
shift,
|
||||
scale)
|
||||
out = np.dot(vec, coeffs)
|
||||
return out
|
||||
|
||||
def __call__(self, x):
|
||||
"""Evaluate the interpolant at `x`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : (Q, N) array_like
|
||||
Evaluation point coordinates.
|
||||
|
||||
Returns
|
||||
-------
|
||||
(Q, ...) ndarray
|
||||
Values of the interpolant at `x`.
|
||||
|
||||
"""
|
||||
x = np.asarray(x, dtype=float, order="C")
|
||||
if x.ndim != 2:
|
||||
raise ValueError("`x` must be a 2-dimensional array.")
|
||||
|
||||
nx, ndim = x.shape
|
||||
if ndim != self.y.shape[1]:
|
||||
raise ValueError("Expected the second axis of `x` to have length "
|
||||
f"{self.y.shape[1]}.")
|
||||
|
||||
# Our memory budget for storing RBF coefficients is
|
||||
# based on how many floats in memory we already occupy
|
||||
# If this number is below 1e6 we just use 1e6
|
||||
# This memory budget is used to decide how we chunk
|
||||
# the inputs
|
||||
memory_budget = max(x.size + self.y.size + self.d.size, 1000000)
|
||||
|
||||
if self.neighbors is None:
|
||||
out = self._chunk_evaluator(
|
||||
x,
|
||||
self.y,
|
||||
self._shift,
|
||||
self._scale,
|
||||
self._coeffs,
|
||||
memory_budget=memory_budget)
|
||||
else:
|
||||
# Get the indices of the k nearest observation points to each
|
||||
# evaluation point.
|
||||
_, yindices = self._tree.query(x, self.neighbors)
|
||||
if self.neighbors == 1:
|
||||
# `KDTree` squeezes the output when neighbors=1.
|
||||
yindices = yindices[:, None]
|
||||
|
||||
# Multiple evaluation points may have the same neighborhood of
|
||||
# observation points. Make the neighborhoods unique so that we only
|
||||
# compute the interpolation coefficients once for each
|
||||
# neighborhood.
|
||||
yindices = np.sort(yindices, axis=1)
|
||||
yindices, inv = np.unique(yindices, return_inverse=True, axis=0)
|
||||
inv = np.reshape(inv, (-1,)) # flatten, we need 1-D indices
|
||||
# `inv` tells us which neighborhood will be used by each evaluation
|
||||
# point. Now we find which evaluation points will be using each
|
||||
# neighborhood.
|
||||
xindices = [[] for _ in range(len(yindices))]
|
||||
for i, j in enumerate(inv):
|
||||
xindices[j].append(i)
|
||||
|
||||
out = np.empty((nx, self.d.shape[1]), dtype=float)
|
||||
for xidx, yidx in zip(xindices, yindices):
|
||||
# `yidx` are the indices of the observations in this
|
||||
# neighborhood. `xidx` are the indices of the evaluation points
|
||||
# that are using this neighborhood.
|
||||
xnbr = x[xidx]
|
||||
ynbr = self.y[yidx]
|
||||
dnbr = self.d[yidx]
|
||||
snbr = self.smoothing[yidx]
|
||||
shift, scale, coeffs = _build_and_solve_system(
|
||||
ynbr,
|
||||
dnbr,
|
||||
snbr,
|
||||
self.kernel,
|
||||
self.epsilon,
|
||||
self.powers,
|
||||
)
|
||||
out[xidx] = self._chunk_evaluator(
|
||||
xnbr,
|
||||
ynbr,
|
||||
shift,
|
||||
scale,
|
||||
coeffs,
|
||||
memory_budget=memory_budget)
|
||||
|
||||
out = out.view(self.d_dtype)
|
||||
out = out.reshape((nx, ) + self.d_shape)
|
||||
return out
|
||||
Binary file not shown.
766
venv/lib/python3.12/site-packages/scipy/interpolate/_rgi.py
Normal file
766
venv/lib/python3.12/site-packages/scipy/interpolate/_rgi.py
Normal file
@ -0,0 +1,766 @@
|
||||
__all__ = ['RegularGridInterpolator', 'interpn']
|
||||
|
||||
import itertools
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
|
||||
import scipy.sparse.linalg as ssl
|
||||
|
||||
from .interpnd import _ndim_coords_from_arrays
|
||||
from ._cubic import PchipInterpolator
|
||||
from ._rgi_cython import evaluate_linear_2d, find_indices
|
||||
from ._bsplines import make_interp_spline
|
||||
from ._fitpack2 import RectBivariateSpline
|
||||
from ._ndbspline import make_ndbspl
|
||||
|
||||
|
||||
def _check_points(points):
|
||||
descending_dimensions = []
|
||||
grid = []
|
||||
for i, p in enumerate(points):
|
||||
# early make points float
|
||||
# see https://github.com/scipy/scipy/pull/17230
|
||||
p = np.asarray(p, dtype=float)
|
||||
if not np.all(p[1:] > p[:-1]):
|
||||
if np.all(p[1:] < p[:-1]):
|
||||
# input is descending, so make it ascending
|
||||
descending_dimensions.append(i)
|
||||
p = np.flip(p)
|
||||
else:
|
||||
raise ValueError(
|
||||
"The points in dimension %d must be strictly "
|
||||
"ascending or descending" % i)
|
||||
# see https://github.com/scipy/scipy/issues/17716
|
||||
p = np.ascontiguousarray(p)
|
||||
grid.append(p)
|
||||
return tuple(grid), tuple(descending_dimensions)
|
||||
|
||||
|
||||
def _check_dimensionality(points, values):
|
||||
if len(points) > values.ndim:
|
||||
raise ValueError("There are %d point arrays, but values has %d "
|
||||
"dimensions" % (len(points), values.ndim))
|
||||
for i, p in enumerate(points):
|
||||
if not np.asarray(p).ndim == 1:
|
||||
raise ValueError("The points in dimension %d must be "
|
||||
"1-dimensional" % i)
|
||||
if not values.shape[i] == len(p):
|
||||
raise ValueError("There are %d points and %d values in "
|
||||
"dimension %d" % (len(p), values.shape[i], i))
|
||||
|
||||
|
||||
class RegularGridInterpolator:
|
||||
"""
|
||||
Interpolator on a regular or rectilinear grid in arbitrary dimensions.
|
||||
|
||||
The data must be defined on a rectilinear grid; that is, a rectangular
|
||||
grid with even or uneven spacing. Linear, nearest-neighbor, spline
|
||||
interpolations are supported. After setting up the interpolator object,
|
||||
the interpolation method may be chosen at each evaluation.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
|
||||
The points defining the regular grid in n dimensions. The points in
|
||||
each dimension (i.e. every elements of the points tuple) must be
|
||||
strictly ascending or descending.
|
||||
|
||||
values : array_like, shape (m1, ..., mn, ...)
|
||||
The data on the regular grid in n dimensions. Complex data is
|
||||
accepted.
|
||||
|
||||
.. deprecated:: 1.13.0
|
||||
Complex data is deprecated with ``method="pchip"`` and will raise an
|
||||
error in SciPy 1.15.0. This is because ``PchipInterpolator`` only
|
||||
works with real values. If you are trying to use the real components of
|
||||
the passed array, use ``np.real`` on ``values``.
|
||||
|
||||
method : str, optional
|
||||
The method of interpolation to perform. Supported are "linear",
|
||||
"nearest", "slinear", "cubic", "quintic" and "pchip". This
|
||||
parameter will become the default for the object's ``__call__``
|
||||
method. Default is "linear".
|
||||
|
||||
bounds_error : bool, optional
|
||||
If True, when interpolated values are requested outside of the
|
||||
domain of the input data, a ValueError is raised.
|
||||
If False, then `fill_value` is used.
|
||||
Default is True.
|
||||
|
||||
fill_value : float or None, optional
|
||||
The value to use for points outside of the interpolation domain.
|
||||
If None, values outside the domain are extrapolated.
|
||||
Default is ``np.nan``.
|
||||
|
||||
solver : callable, optional
|
||||
Only used for methods "slinear", "cubic" and "quintic".
|
||||
Sparse linear algebra solver for construction of the NdBSpline instance.
|
||||
Default is the iterative solver `scipy.sparse.linalg.gcrotmk`.
|
||||
|
||||
.. versionadded:: 1.13
|
||||
|
||||
solver_args: dict, optional
|
||||
Additional arguments to pass to `solver`, if any.
|
||||
|
||||
.. versionadded:: 1.13
|
||||
|
||||
Methods
|
||||
-------
|
||||
__call__
|
||||
|
||||
Attributes
|
||||
----------
|
||||
grid : tuple of ndarrays
|
||||
The points defining the regular grid in n dimensions.
|
||||
This tuple defines the full grid via
|
||||
``np.meshgrid(*grid, indexing='ij')``
|
||||
values : ndarray
|
||||
Data values at the grid.
|
||||
method : str
|
||||
Interpolation method.
|
||||
fill_value : float or ``None``
|
||||
Use this value for out-of-bounds arguments to `__call__`.
|
||||
bounds_error : bool
|
||||
If ``True``, out-of-bounds argument raise a ``ValueError``.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Contrary to `LinearNDInterpolator` and `NearestNDInterpolator`, this class
|
||||
avoids expensive triangulation of the input data by taking advantage of the
|
||||
regular grid structure.
|
||||
|
||||
In other words, this class assumes that the data is defined on a
|
||||
*rectilinear* grid.
|
||||
|
||||
.. versionadded:: 0.14
|
||||
|
||||
The 'slinear'(k=1), 'cubic'(k=3), and 'quintic'(k=5) methods are
|
||||
tensor-product spline interpolators, where `k` is the spline degree,
|
||||
If any dimension has fewer points than `k` + 1, an error will be raised.
|
||||
|
||||
.. versionadded:: 1.9
|
||||
|
||||
If the input data is such that dimensions have incommensurate
|
||||
units and differ by many orders of magnitude, the interpolant may have
|
||||
numerical artifacts. Consider rescaling the data before interpolating.
|
||||
|
||||
**Choosing a solver for spline methods**
|
||||
|
||||
Spline methods, "slinear", "cubic" and "quintic" involve solving a
|
||||
large sparse linear system at instantiation time. Depending on data,
|
||||
the default solver may or may not be adequate. When it is not, you may
|
||||
need to experiment with an optional `solver` argument, where you may
|
||||
choose between the direct solver (`scipy.sparse.linalg.spsolve`) or
|
||||
iterative solvers from `scipy.sparse.linalg`. You may need to supply
|
||||
additional parameters via the optional `solver_args` parameter (for instance,
|
||||
you may supply the starting value or target tolerance). See the
|
||||
`scipy.sparse.linalg` documentation for the full list of available options.
|
||||
|
||||
Alternatively, you may instead use the legacy methods, "slinear_legacy",
|
||||
"cubic_legacy" and "quintic_legacy". These methods allow faster construction
|
||||
but evaluations will be much slower.
|
||||
|
||||
Examples
|
||||
--------
|
||||
**Evaluate a function on the points of a 3-D grid**
|
||||
|
||||
As a first example, we evaluate a simple example function on the points of
|
||||
a 3-D grid:
|
||||
|
||||
>>> from scipy.interpolate import RegularGridInterpolator
|
||||
>>> import numpy as np
|
||||
>>> def f(x, y, z):
|
||||
... return 2 * x**3 + 3 * y**2 - z
|
||||
>>> x = np.linspace(1, 4, 11)
|
||||
>>> y = np.linspace(4, 7, 22)
|
||||
>>> z = np.linspace(7, 9, 33)
|
||||
>>> xg, yg ,zg = np.meshgrid(x, y, z, indexing='ij', sparse=True)
|
||||
>>> data = f(xg, yg, zg)
|
||||
|
||||
``data`` is now a 3-D array with ``data[i, j, k] = f(x[i], y[j], z[k])``.
|
||||
Next, define an interpolating function from this data:
|
||||
|
||||
>>> interp = RegularGridInterpolator((x, y, z), data)
|
||||
|
||||
Evaluate the interpolating function at the two points
|
||||
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
|
||||
|
||||
>>> pts = np.array([[2.1, 6.2, 8.3],
|
||||
... [3.3, 5.2, 7.1]])
|
||||
>>> interp(pts)
|
||||
array([ 125.80469388, 146.30069388])
|
||||
|
||||
which is indeed a close approximation to
|
||||
|
||||
>>> f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)
|
||||
(125.54200000000002, 145.894)
|
||||
|
||||
**Interpolate and extrapolate a 2D dataset**
|
||||
|
||||
As a second example, we interpolate and extrapolate a 2D data set:
|
||||
|
||||
>>> x, y = np.array([-2, 0, 4]), np.array([-2, 0, 2, 5])
|
||||
>>> def ff(x, y):
|
||||
... return x**2 + y**2
|
||||
|
||||
>>> xg, yg = np.meshgrid(x, y, indexing='ij')
|
||||
>>> data = ff(xg, yg)
|
||||
>>> interp = RegularGridInterpolator((x, y), data,
|
||||
... bounds_error=False, fill_value=None)
|
||||
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> fig = plt.figure()
|
||||
>>> ax = fig.add_subplot(projection='3d')
|
||||
>>> ax.scatter(xg.ravel(), yg.ravel(), data.ravel(),
|
||||
... s=60, c='k', label='data')
|
||||
|
||||
Evaluate and plot the interpolator on a finer grid
|
||||
|
||||
>>> xx = np.linspace(-4, 9, 31)
|
||||
>>> yy = np.linspace(-4, 9, 31)
|
||||
>>> X, Y = np.meshgrid(xx, yy, indexing='ij')
|
||||
|
||||
>>> # interpolator
|
||||
>>> ax.plot_wireframe(X, Y, interp((X, Y)), rstride=3, cstride=3,
|
||||
... alpha=0.4, color='m', label='linear interp')
|
||||
|
||||
>>> # ground truth
|
||||
>>> ax.plot_wireframe(X, Y, ff(X, Y), rstride=3, cstride=3,
|
||||
... alpha=0.4, label='ground truth')
|
||||
>>> plt.legend()
|
||||
>>> plt.show()
|
||||
|
||||
Other examples are given
|
||||
:ref:`in the tutorial <tutorial-interpolate_regular_grid_interpolator>`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
NearestNDInterpolator : Nearest neighbor interpolator on *unstructured*
|
||||
data in N dimensions
|
||||
|
||||
LinearNDInterpolator : Piecewise linear interpolator on *unstructured* data
|
||||
in N dimensions
|
||||
|
||||
interpn : a convenience function which wraps `RegularGridInterpolator`
|
||||
|
||||
scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
|
||||
(suitable for e.g., N-D image resampling)
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Python package *regulargrid* by Johannes Buchner, see
|
||||
https://pypi.python.org/pypi/regulargrid/
|
||||
.. [2] Wikipedia, "Trilinear interpolation",
|
||||
https://en.wikipedia.org/wiki/Trilinear_interpolation
|
||||
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
|
||||
and multilinear table interpolation in many dimensions." MATH.
|
||||
COMPUT. 50.181 (1988): 189-196.
|
||||
https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
|
||||
:doi:`10.1090/S0025-5718-1988-0917826-0`
|
||||
|
||||
"""
|
||||
# this class is based on code originally programmed by Johannes Buchner,
|
||||
# see https://github.com/JohannesBuchner/regulargrid
|
||||
|
||||
_SPLINE_DEGREE_MAP = {"slinear": 1, "cubic": 3, "quintic": 5, 'pchip': 3,
|
||||
"slinear_legacy": 1, "cubic_legacy": 3, "quintic_legacy": 5,}
|
||||
_SPLINE_METHODS_recursive = {"slinear_legacy", "cubic_legacy",
|
||||
"quintic_legacy", "pchip"}
|
||||
_SPLINE_METHODS_ndbspl = {"slinear", "cubic", "quintic"}
|
||||
_SPLINE_METHODS = list(_SPLINE_DEGREE_MAP.keys())
|
||||
_ALL_METHODS = ["linear", "nearest"] + _SPLINE_METHODS
|
||||
|
||||
def __init__(self, points, values, method="linear", bounds_error=True,
|
||||
fill_value=np.nan, *, solver=None, solver_args=None):
|
||||
if method not in self._ALL_METHODS:
|
||||
raise ValueError("Method '%s' is not defined" % method)
|
||||
elif method in self._SPLINE_METHODS:
|
||||
self._validate_grid_dimensions(points, method)
|
||||
self.method = method
|
||||
self.bounds_error = bounds_error
|
||||
self.grid, self._descending_dimensions = _check_points(points)
|
||||
self.values = self._check_values(values)
|
||||
self._check_dimensionality(self.grid, self.values)
|
||||
self.fill_value = self._check_fill_value(self.values, fill_value)
|
||||
if self._descending_dimensions:
|
||||
self.values = np.flip(values, axis=self._descending_dimensions)
|
||||
if self.method == "pchip" and np.iscomplexobj(self.values):
|
||||
msg = ("`PchipInterpolator` only works with real values. Passing "
|
||||
"complex-dtyped `values` with `method='pchip'` is deprecated "
|
||||
"and will raise an error in SciPy 1.15.0. If you are trying to "
|
||||
"use the real components of the passed array, use `np.real` on "
|
||||
"the array before passing to `RegularGridInterpolator`.")
|
||||
warnings.warn(msg, DeprecationWarning, stacklevel=2)
|
||||
if method in self._SPLINE_METHODS_ndbspl:
|
||||
if solver_args is None:
|
||||
solver_args = {}
|
||||
self._spline = self._construct_spline(method, solver, **solver_args)
|
||||
else:
|
||||
if solver is not None or solver_args:
|
||||
raise ValueError(
|
||||
f"{method =} does not accept the 'solver' argument. Got "
|
||||
f" {solver = } and with arguments {solver_args}."
|
||||
)
|
||||
|
||||
def _construct_spline(self, method, solver=None, **solver_args):
|
||||
if solver is None:
|
||||
solver = ssl.gcrotmk
|
||||
spl = make_ndbspl(
|
||||
self.grid, self.values, self._SPLINE_DEGREE_MAP[method],
|
||||
solver=solver, **solver_args
|
||||
)
|
||||
return spl
|
||||
|
||||
def _check_dimensionality(self, grid, values):
|
||||
_check_dimensionality(grid, values)
|
||||
|
||||
def _check_points(self, points):
|
||||
return _check_points(points)
|
||||
|
||||
def _check_values(self, values):
|
||||
if not hasattr(values, 'ndim'):
|
||||
# allow reasonable duck-typed values
|
||||
values = np.asarray(values)
|
||||
|
||||
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
|
||||
if not np.issubdtype(values.dtype, np.inexact):
|
||||
values = values.astype(float)
|
||||
|
||||
return values
|
||||
|
||||
def _check_fill_value(self, values, fill_value):
|
||||
if fill_value is not None:
|
||||
fill_value_dtype = np.asarray(fill_value).dtype
|
||||
if (hasattr(values, 'dtype') and not
|
||||
np.can_cast(fill_value_dtype, values.dtype,
|
||||
casting='same_kind')):
|
||||
raise ValueError("fill_value must be either 'None' or "
|
||||
"of a type compatible with values")
|
||||
return fill_value
|
||||
|
||||
def __call__(self, xi, method=None, *, nu=None):
|
||||
"""
|
||||
Interpolation at coordinates.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
xi : ndarray of shape (..., ndim)
|
||||
The coordinates to evaluate the interpolator at.
|
||||
|
||||
method : str, optional
|
||||
The method of interpolation to perform. Supported are "linear",
|
||||
"nearest", "slinear", "cubic", "quintic" and "pchip". Default is
|
||||
the method chosen when the interpolator was created.
|
||||
|
||||
nu : sequence of ints, length ndim, optional
|
||||
If not None, the orders of the derivatives to evaluate.
|
||||
Each entry must be non-negative.
|
||||
Only allowed for methods "slinear", "cubic" and "quintic".
|
||||
|
||||
.. versionadded:: 1.13
|
||||
|
||||
Returns
|
||||
-------
|
||||
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
|
||||
Interpolated values at `xi`. See notes for behaviour when
|
||||
``xi.ndim == 1``.
|
||||
|
||||
Notes
|
||||
-----
|
||||
In the case that ``xi.ndim == 1`` a new axis is inserted into
|
||||
the 0 position of the returned array, values_x, so its shape is
|
||||
instead ``(1,) + values.shape[ndim:]``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Here we define a nearest-neighbor interpolator of a simple function
|
||||
|
||||
>>> import numpy as np
|
||||
>>> x, y = np.array([0, 1, 2]), np.array([1, 3, 7])
|
||||
>>> def f(x, y):
|
||||
... return x**2 + y**2
|
||||
>>> data = f(*np.meshgrid(x, y, indexing='ij', sparse=True))
|
||||
>>> from scipy.interpolate import RegularGridInterpolator
|
||||
>>> interp = RegularGridInterpolator((x, y), data, method='nearest')
|
||||
|
||||
By construction, the interpolator uses the nearest-neighbor
|
||||
interpolation
|
||||
|
||||
>>> interp([[1.5, 1.3], [0.3, 4.5]])
|
||||
array([2., 9.])
|
||||
|
||||
We can however evaluate the linear interpolant by overriding the
|
||||
`method` parameter
|
||||
|
||||
>>> interp([[1.5, 1.3], [0.3, 4.5]], method='linear')
|
||||
array([ 4.7, 24.3])
|
||||
"""
|
||||
method = self.method if method is None else method
|
||||
is_method_changed = self.method != method
|
||||
if method not in self._ALL_METHODS:
|
||||
raise ValueError("Method '%s' is not defined" % method)
|
||||
if is_method_changed and method in self._SPLINE_METHODS_ndbspl:
|
||||
self._spline = self._construct_spline(method)
|
||||
|
||||
if nu is not None and method not in self._SPLINE_METHODS_ndbspl:
|
||||
raise ValueError(
|
||||
f"Can only compute derivatives for methods "
|
||||
f"{self._SPLINE_METHODS_ndbspl}, got {method =}."
|
||||
)
|
||||
|
||||
xi, xi_shape, ndim, nans, out_of_bounds = self._prepare_xi(xi)
|
||||
|
||||
if method == "linear":
|
||||
indices, norm_distances = self._find_indices(xi.T)
|
||||
if (ndim == 2 and hasattr(self.values, 'dtype') and
|
||||
self.values.ndim == 2 and self.values.flags.writeable and
|
||||
self.values.dtype in (np.float64, np.complex128) and
|
||||
self.values.dtype.byteorder == '='):
|
||||
# until cython supports const fused types, the fast path
|
||||
# cannot support non-writeable values
|
||||
# a fast path
|
||||
out = np.empty(indices.shape[1], dtype=self.values.dtype)
|
||||
result = evaluate_linear_2d(self.values,
|
||||
indices,
|
||||
norm_distances,
|
||||
self.grid,
|
||||
out)
|
||||
else:
|
||||
result = self._evaluate_linear(indices, norm_distances)
|
||||
elif method == "nearest":
|
||||
indices, norm_distances = self._find_indices(xi.T)
|
||||
result = self._evaluate_nearest(indices, norm_distances)
|
||||
elif method in self._SPLINE_METHODS:
|
||||
if is_method_changed:
|
||||
self._validate_grid_dimensions(self.grid, method)
|
||||
if method in self._SPLINE_METHODS_recursive:
|
||||
result = self._evaluate_spline(xi, method)
|
||||
else:
|
||||
result = self._spline(xi, nu=nu)
|
||||
|
||||
if not self.bounds_error and self.fill_value is not None:
|
||||
result[out_of_bounds] = self.fill_value
|
||||
|
||||
# f(nan) = nan, if any
|
||||
if np.any(nans):
|
||||
result[nans] = np.nan
|
||||
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
|
||||
|
||||
def _prepare_xi(self, xi):
|
||||
ndim = len(self.grid)
|
||||
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
|
||||
if xi.shape[-1] != len(self.grid):
|
||||
raise ValueError("The requested sample points xi have dimension "
|
||||
f"{xi.shape[-1]} but this "
|
||||
f"RegularGridInterpolator has dimension {ndim}")
|
||||
|
||||
xi_shape = xi.shape
|
||||
xi = xi.reshape(-1, xi_shape[-1])
|
||||
xi = np.asarray(xi, dtype=float)
|
||||
|
||||
# find nans in input
|
||||
nans = np.any(np.isnan(xi), axis=-1)
|
||||
|
||||
if self.bounds_error:
|
||||
for i, p in enumerate(xi.T):
|
||||
if not np.logical_and(np.all(self.grid[i][0] <= p),
|
||||
np.all(p <= self.grid[i][-1])):
|
||||
raise ValueError("One of the requested xi is out of bounds "
|
||||
"in dimension %d" % i)
|
||||
out_of_bounds = None
|
||||
else:
|
||||
out_of_bounds = self._find_out_of_bounds(xi.T)
|
||||
|
||||
return xi, xi_shape, ndim, nans, out_of_bounds
|
||||
|
||||
def _evaluate_linear(self, indices, norm_distances):
|
||||
# slice for broadcasting over trailing dimensions in self.values
|
||||
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
|
||||
|
||||
# Compute shifting up front before zipping everything together
|
||||
shift_norm_distances = [1 - yi for yi in norm_distances]
|
||||
shift_indices = [i + 1 for i in indices]
|
||||
|
||||
# The formula for linear interpolation in 2d takes the form:
|
||||
# values = self.values[(i0, i1)] * (1 - y0) * (1 - y1) + \
|
||||
# self.values[(i0, i1 + 1)] * (1 - y0) * y1 + \
|
||||
# self.values[(i0 + 1, i1)] * y0 * (1 - y1) + \
|
||||
# self.values[(i0 + 1, i1 + 1)] * y0 * y1
|
||||
# We pair i with 1 - yi (zipped1) and i + 1 with yi (zipped2)
|
||||
zipped1 = zip(indices, shift_norm_distances)
|
||||
zipped2 = zip(shift_indices, norm_distances)
|
||||
|
||||
# Take all products of zipped1 and zipped2 and iterate over them
|
||||
# to get the terms in the above formula. This corresponds to iterating
|
||||
# over the vertices of a hypercube.
|
||||
hypercube = itertools.product(*zip(zipped1, zipped2))
|
||||
value = np.array([0.])
|
||||
for h in hypercube:
|
||||
edge_indices, weights = zip(*h)
|
||||
weight = np.array([1.])
|
||||
for w in weights:
|
||||
weight = weight * w
|
||||
term = np.asarray(self.values[edge_indices]) * weight[vslice]
|
||||
value = value + term # cannot use += because broadcasting
|
||||
return value
|
||||
|
||||
def _evaluate_nearest(self, indices, norm_distances):
|
||||
idx_res = [np.where(yi <= .5, i, i + 1)
|
||||
for i, yi in zip(indices, norm_distances)]
|
||||
return self.values[tuple(idx_res)]
|
||||
|
||||
def _validate_grid_dimensions(self, points, method):
|
||||
k = self._SPLINE_DEGREE_MAP[method]
|
||||
for i, point in enumerate(points):
|
||||
ndim = len(np.atleast_1d(point))
|
||||
if ndim <= k:
|
||||
raise ValueError(f"There are {ndim} points in dimension {i},"
|
||||
f" but method {method} requires at least "
|
||||
f" {k+1} points per dimension.")
|
||||
|
||||
def _evaluate_spline(self, xi, method):
|
||||
# ensure xi is 2D list of points to evaluate (`m` is the number of
|
||||
# points and `n` is the number of interpolation dimensions,
|
||||
# ``n == len(self.grid)``.)
|
||||
if xi.ndim == 1:
|
||||
xi = xi.reshape((1, xi.size))
|
||||
m, n = xi.shape
|
||||
|
||||
# Reorder the axes: n-dimensional process iterates over the
|
||||
# interpolation axes from the last axis downwards: E.g. for a 4D grid
|
||||
# the order of axes is 3, 2, 1, 0. Each 1D interpolation works along
|
||||
# the 0th axis of its argument array (for 1D routine it's its ``y``
|
||||
# array). Thus permute the interpolation axes of `values` *and keep
|
||||
# trailing dimensions trailing*.
|
||||
axes = tuple(range(self.values.ndim))
|
||||
axx = axes[:n][::-1] + axes[n:]
|
||||
values = self.values.transpose(axx)
|
||||
|
||||
if method == 'pchip':
|
||||
_eval_func = self._do_pchip
|
||||
else:
|
||||
_eval_func = self._do_spline_fit
|
||||
k = self._SPLINE_DEGREE_MAP[method]
|
||||
|
||||
# Non-stationary procedure: difficult to vectorize this part entirely
|
||||
# into numpy-level operations. Unfortunately this requires explicit
|
||||
# looping over each point in xi.
|
||||
|
||||
# can at least vectorize the first pass across all points in the
|
||||
# last variable of xi.
|
||||
last_dim = n - 1
|
||||
first_values = _eval_func(self.grid[last_dim],
|
||||
values,
|
||||
xi[:, last_dim],
|
||||
k)
|
||||
|
||||
# the rest of the dimensions have to be on a per point-in-xi basis
|
||||
shape = (m, *self.values.shape[n:])
|
||||
result = np.empty(shape, dtype=self.values.dtype)
|
||||
for j in range(m):
|
||||
# Main process: Apply 1D interpolate in each dimension
|
||||
# sequentially, starting with the last dimension.
|
||||
# These are then "folded" into the next dimension in-place.
|
||||
folded_values = first_values[j, ...]
|
||||
for i in range(last_dim-1, -1, -1):
|
||||
# Interpolate for each 1D from the last dimensions.
|
||||
# This collapses each 1D sequence into a scalar.
|
||||
folded_values = _eval_func(self.grid[i],
|
||||
folded_values,
|
||||
xi[j, i],
|
||||
k)
|
||||
result[j, ...] = folded_values
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _do_spline_fit(x, y, pt, k):
|
||||
local_interp = make_interp_spline(x, y, k=k, axis=0)
|
||||
values = local_interp(pt)
|
||||
return values
|
||||
|
||||
@staticmethod
|
||||
def _do_pchip(x, y, pt, k):
|
||||
local_interp = PchipInterpolator(x, y, axis=0)
|
||||
values = local_interp(pt)
|
||||
return values
|
||||
|
||||
def _find_indices(self, xi):
|
||||
return find_indices(self.grid, xi)
|
||||
|
||||
def _find_out_of_bounds(self, xi):
|
||||
# check for out of bounds xi
|
||||
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
|
||||
# iterate through dimensions
|
||||
for x, grid in zip(xi, self.grid):
|
||||
out_of_bounds += x < grid[0]
|
||||
out_of_bounds += x > grid[-1]
|
||||
return out_of_bounds
|
||||
|
||||
|
||||
def interpn(points, values, xi, method="linear", bounds_error=True,
|
||||
fill_value=np.nan):
|
||||
"""
|
||||
Multidimensional interpolation on regular or rectilinear grids.
|
||||
|
||||
Strictly speaking, not all regular grids are supported - this function
|
||||
works on *rectilinear* grids, that is, a rectangular grid with even or
|
||||
uneven spacing.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
|
||||
The points defining the regular grid in n dimensions. The points in
|
||||
each dimension (i.e. every elements of the points tuple) must be
|
||||
strictly ascending or descending.
|
||||
|
||||
values : array_like, shape (m1, ..., mn, ...)
|
||||
The data on the regular grid in n dimensions. Complex data is
|
||||
accepted.
|
||||
|
||||
.. deprecated:: 1.13.0
|
||||
Complex data is deprecated with ``method="pchip"`` and will raise an
|
||||
error in SciPy 1.15.0. This is because ``PchipInterpolator`` only
|
||||
works with real values. If you are trying to use the real components of
|
||||
the passed array, use ``np.real`` on ``values``.
|
||||
|
||||
xi : ndarray of shape (..., ndim)
|
||||
The coordinates to sample the gridded data at
|
||||
|
||||
method : str, optional
|
||||
The method of interpolation to perform. Supported are "linear",
|
||||
"nearest", "slinear", "cubic", "quintic", "pchip", and "splinef2d".
|
||||
"splinef2d" is only supported for 2-dimensional data.
|
||||
|
||||
bounds_error : bool, optional
|
||||
If True, when interpolated values are requested outside of the
|
||||
domain of the input data, a ValueError is raised.
|
||||
If False, then `fill_value` is used.
|
||||
|
||||
fill_value : number, optional
|
||||
If provided, the value to use for points outside of the
|
||||
interpolation domain. If None, values outside
|
||||
the domain are extrapolated. Extrapolation is not supported by method
|
||||
"splinef2d".
|
||||
|
||||
Returns
|
||||
-------
|
||||
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
|
||||
Interpolated values at `xi`. See notes for behaviour when
|
||||
``xi.ndim == 1``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
NearestNDInterpolator : Nearest neighbor interpolation on unstructured
|
||||
data in N dimensions
|
||||
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
|
||||
in N dimensions
|
||||
RegularGridInterpolator : interpolation on a regular or rectilinear grid
|
||||
in arbitrary dimensions (`interpn` wraps this
|
||||
class).
|
||||
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
|
||||
scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
|
||||
(suitable for e.g., N-D image resampling)
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
.. versionadded:: 0.14
|
||||
|
||||
In the case that ``xi.ndim == 1`` a new axis is inserted into
|
||||
the 0 position of the returned array, values_x, so its shape is
|
||||
instead ``(1,) + values.shape[ndim:]``.
|
||||
|
||||
If the input data is such that input dimensions have incommensurate
|
||||
units and differ by many orders of magnitude, the interpolant may have
|
||||
numerical artifacts. Consider rescaling the data before interpolation.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Evaluate a simple example function on the points of a regular 3-D grid:
|
||||
|
||||
>>> import numpy as np
|
||||
>>> from scipy.interpolate import interpn
|
||||
>>> def value_func_3d(x, y, z):
|
||||
... return 2 * x + 3 * y - z
|
||||
>>> x = np.linspace(0, 4, 5)
|
||||
>>> y = np.linspace(0, 5, 6)
|
||||
>>> z = np.linspace(0, 6, 7)
|
||||
>>> points = (x, y, z)
|
||||
>>> values = value_func_3d(*np.meshgrid(*points, indexing='ij'))
|
||||
|
||||
Evaluate the interpolating function at a point
|
||||
|
||||
>>> point = np.array([2.21, 3.12, 1.15])
|
||||
>>> print(interpn(points, values, point))
|
||||
[12.63]
|
||||
|
||||
"""
|
||||
# sanity check 'method' kwarg
|
||||
if method not in ["linear", "nearest", "cubic", "quintic", "pchip",
|
||||
"splinef2d", "slinear",
|
||||
"slinear_legacy", "cubic_legacy", "quintic_legacy"]:
|
||||
raise ValueError("interpn only understands the methods 'linear', "
|
||||
"'nearest', 'slinear', 'cubic', 'quintic', 'pchip', "
|
||||
f"and 'splinef2d'. You provided {method}.")
|
||||
|
||||
if not hasattr(values, 'ndim'):
|
||||
values = np.asarray(values)
|
||||
|
||||
ndim = values.ndim
|
||||
if ndim > 2 and method == "splinef2d":
|
||||
raise ValueError("The method splinef2d can only be used for "
|
||||
"2-dimensional input data")
|
||||
if not bounds_error and fill_value is None and method == "splinef2d":
|
||||
raise ValueError("The method splinef2d does not support extrapolation.")
|
||||
|
||||
# sanity check consistency of input dimensions
|
||||
if len(points) > ndim:
|
||||
raise ValueError("There are %d point arrays, but values has %d "
|
||||
"dimensions" % (len(points), ndim))
|
||||
if len(points) != ndim and method == 'splinef2d':
|
||||
raise ValueError("The method splinef2d can only be used for "
|
||||
"scalar data with one point per coordinate")
|
||||
|
||||
grid, descending_dimensions = _check_points(points)
|
||||
_check_dimensionality(grid, values)
|
||||
|
||||
# sanity check requested xi
|
||||
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
|
||||
if xi.shape[-1] != len(grid):
|
||||
raise ValueError("The requested sample points xi have dimension "
|
||||
"%d, but this RegularGridInterpolator has "
|
||||
"dimension %d" % (xi.shape[-1], len(grid)))
|
||||
|
||||
if bounds_error:
|
||||
for i, p in enumerate(xi.T):
|
||||
if not np.logical_and(np.all(grid[i][0] <= p),
|
||||
np.all(p <= grid[i][-1])):
|
||||
raise ValueError("One of the requested xi is out of bounds "
|
||||
"in dimension %d" % i)
|
||||
|
||||
# perform interpolation
|
||||
if method in RegularGridInterpolator._ALL_METHODS:
|
||||
interp = RegularGridInterpolator(points, values, method=method,
|
||||
bounds_error=bounds_error,
|
||||
fill_value=fill_value)
|
||||
return interp(xi)
|
||||
elif method == "splinef2d":
|
||||
xi_shape = xi.shape
|
||||
xi = xi.reshape(-1, xi.shape[-1])
|
||||
|
||||
# RectBivariateSpline doesn't support fill_value; we need to wrap here
|
||||
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
|
||||
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
|
||||
axis=0)
|
||||
result = np.empty_like(xi[:, 0])
|
||||
|
||||
# make a copy of values for RectBivariateSpline
|
||||
interp = RectBivariateSpline(points[0], points[1], values[:])
|
||||
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
|
||||
result[np.logical_not(idx_valid)] = fill_value
|
||||
|
||||
return result.reshape(xi_shape[:-1])
|
||||
else:
|
||||
raise ValueError(f"unknown {method = }")
|
||||
Binary file not shown.
@ -0,0 +1,44 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.interpolate` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
from scipy._lib.deprecation import _sub_module_deprecation
|
||||
|
||||
|
||||
__all__ = [ # noqa: F822
|
||||
'bispeu',
|
||||
'bispev',
|
||||
'curfit',
|
||||
'dblint',
|
||||
'fpchec',
|
||||
'fpcurf0',
|
||||
'fpcurf1',
|
||||
'fpcurfm1',
|
||||
'parcur',
|
||||
'parder',
|
||||
'pardeu',
|
||||
'pardtc',
|
||||
'percur',
|
||||
'regrid_smth',
|
||||
'regrid_smth_spher',
|
||||
'spalde',
|
||||
'spherfit_lsq',
|
||||
'spherfit_smth',
|
||||
'splder',
|
||||
'splev',
|
||||
'splint',
|
||||
'sproot',
|
||||
'surfit_lsq',
|
||||
'surfit_smth',
|
||||
'types',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
return _sub_module_deprecation(sub_package="interpolate", module="dfitpack",
|
||||
private_modules=["_dfitpack"], all=__all__,
|
||||
attribute=name)
|
||||
@ -0,0 +1,31 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.interpolate` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
from scipy._lib.deprecation import _sub_module_deprecation
|
||||
|
||||
|
||||
__all__ = [ # noqa: F822
|
||||
'BSpline',
|
||||
'bisplev',
|
||||
'bisplrep',
|
||||
'insert',
|
||||
'spalde',
|
||||
'splantider',
|
||||
'splder',
|
||||
'splev',
|
||||
'splint',
|
||||
'splprep',
|
||||
'splrep',
|
||||
'sproot',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
return _sub_module_deprecation(sub_package="interpolate", module="fitpack",
|
||||
private_modules=["_fitpack_py"], all=__all__,
|
||||
attribute=name)
|
||||
@ -0,0 +1,29 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.interpolate` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
from scipy._lib.deprecation import _sub_module_deprecation
|
||||
|
||||
|
||||
__all__ = [ # noqa: F822
|
||||
'BivariateSpline',
|
||||
'InterpolatedUnivariateSpline',
|
||||
'LSQBivariateSpline',
|
||||
'LSQSphereBivariateSpline',
|
||||
'LSQUnivariateSpline',
|
||||
'RectBivariateSpline',
|
||||
'RectSphereBivariateSpline',
|
||||
'SmoothBivariateSpline',
|
||||
'SmoothSphereBivariateSpline',
|
||||
'UnivariateSpline',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
return _sub_module_deprecation(sub_package="interpolate", module="fitpack2",
|
||||
private_modules=["_fitpack2"], all=__all__,
|
||||
attribute=name)
|
||||
Binary file not shown.
@ -0,0 +1,30 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.interpolate` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
from scipy._lib.deprecation import _sub_module_deprecation
|
||||
|
||||
|
||||
__all__ = [ # noqa: F822
|
||||
'BPoly',
|
||||
'BSpline',
|
||||
'NdPPoly',
|
||||
'PPoly',
|
||||
'RectBivariateSpline',
|
||||
'RegularGridInterpolator',
|
||||
'interp1d',
|
||||
'interp2d',
|
||||
'interpn',
|
||||
'lagrange',
|
||||
'make_interp_spline',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
return _sub_module_deprecation(sub_package="interpolate", module="interpolate",
|
||||
private_modules=["_interpolate", "fitpack2", "_rgi"],
|
||||
all=__all__, attribute=name)
|
||||
@ -0,0 +1,23 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.interpolate` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
from scipy._lib.deprecation import _sub_module_deprecation
|
||||
|
||||
|
||||
__all__ = [ # noqa: F822
|
||||
'CloughTocher2DInterpolator',
|
||||
'LinearNDInterpolator',
|
||||
'NearestNDInterpolator',
|
||||
'griddata',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
return _sub_module_deprecation(sub_package="interpolate", module="ndgriddata",
|
||||
private_modules=["_ndgriddata"], all=__all__,
|
||||
attribute=name)
|
||||
@ -0,0 +1,24 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.interpolate` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
from scipy._lib.deprecation import _sub_module_deprecation
|
||||
|
||||
|
||||
__all__ = [ # noqa: F822
|
||||
'BarycentricInterpolator',
|
||||
'KroghInterpolator',
|
||||
'approximate_taylor_polynomial',
|
||||
'barycentric_interpolate',
|
||||
'krogh_interpolate',
|
||||
]
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
return _sub_module_deprecation(sub_package="interpolate", module="polyint",
|
||||
private_modules=["_polyint"], all=__all__,
|
||||
attribute=name)
|
||||
18
venv/lib/python3.12/site-packages/scipy/interpolate/rbf.py
Normal file
18
venv/lib/python3.12/site-packages/scipy/interpolate/rbf.py
Normal file
@ -0,0 +1,18 @@
|
||||
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
||||
# Use the `scipy.interpolate` namespace for importing the functions
|
||||
# included below.
|
||||
|
||||
from scipy._lib.deprecation import _sub_module_deprecation
|
||||
|
||||
|
||||
__all__ = ["Rbf"] # noqa: F822
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
return _sub_module_deprecation(sub_package="interpolate", module="rbf",
|
||||
private_modules=["_rbf"], all=__all__,
|
||||
attribute=name)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,503 @@
|
||||
import itertools
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (assert_equal, assert_allclose, assert_,
|
||||
assert_almost_equal, assert_array_almost_equal)
|
||||
from pytest import raises as assert_raises
|
||||
import pytest
|
||||
from scipy._lib._testutils import check_free_memory
|
||||
|
||||
from scipy.interpolate import RectBivariateSpline
|
||||
|
||||
from scipy.interpolate._fitpack_py import (splrep, splev, bisplrep, bisplev,
|
||||
sproot, splprep, splint, spalde, splder, splantider, insert, dblint)
|
||||
from scipy.interpolate._dfitpack import regrid_smth
|
||||
from scipy.interpolate._fitpack2 import dfitpack_int
|
||||
|
||||
|
||||
def data_file(basename):
|
||||
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||
'data', basename)
|
||||
|
||||
|
||||
def norm2(x):
|
||||
return np.sqrt(np.dot(x.T, x))
|
||||
|
||||
|
||||
def f1(x, d=0):
|
||||
"""Derivatives of sin->cos->-sin->-cos."""
|
||||
if d % 4 == 0:
|
||||
return np.sin(x)
|
||||
if d % 4 == 1:
|
||||
return np.cos(x)
|
||||
if d % 4 == 2:
|
||||
return -np.sin(x)
|
||||
if d % 4 == 3:
|
||||
return -np.cos(x)
|
||||
|
||||
|
||||
def makepairs(x, y):
|
||||
"""Helper function to create an array of pairs of x and y."""
|
||||
xy = np.array(list(itertools.product(np.asarray(x), np.asarray(y))))
|
||||
return xy.T
|
||||
|
||||
|
||||
class TestSmokeTests:
|
||||
"""
|
||||
Smoke tests (with a few asserts) for fitpack routines -- mostly
|
||||
check that they are runnable
|
||||
"""
|
||||
def check_1(self, per=0, s=0, a=0, b=2*np.pi, at_nodes=False,
|
||||
xb=None, xe=None):
|
||||
if xb is None:
|
||||
xb = a
|
||||
if xe is None:
|
||||
xe = b
|
||||
|
||||
N = 20
|
||||
# nodes and middle points of the nodes
|
||||
x = np.linspace(a, b, N + 1)
|
||||
x1 = a + (b - a) * np.arange(1, N, dtype=float) / float(N - 1)
|
||||
v = f1(x)
|
||||
|
||||
def err_est(k, d):
|
||||
# Assume f has all derivatives < 1
|
||||
h = 1.0 / N
|
||||
tol = 5 * h**(.75*(k-d))
|
||||
if s > 0:
|
||||
tol += 1e5*s
|
||||
return tol
|
||||
|
||||
for k in range(1, 6):
|
||||
tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
|
||||
tt = tck[0][k:-k] if at_nodes else x1
|
||||
|
||||
for d in range(k+1):
|
||||
tol = err_est(k, d)
|
||||
err = norm2(f1(tt, d) - splev(tt, tck, d)) / norm2(f1(tt, d))
|
||||
assert err < tol
|
||||
|
||||
def check_2(self, per=0, N=20, ia=0, ib=2*np.pi):
|
||||
a, b, dx = 0, 2*np.pi, 0.2*np.pi
|
||||
x = np.linspace(a, b, N+1) # nodes
|
||||
v = np.sin(x)
|
||||
|
||||
def err_est(k, d):
|
||||
# Assume f has all derivatives < 1
|
||||
h = 1.0 / N
|
||||
tol = 5 * h**(.75*(k-d))
|
||||
return tol
|
||||
|
||||
nk = []
|
||||
for k in range(1, 6):
|
||||
tck = splrep(x, v, s=0, per=per, k=k, xe=b)
|
||||
nk.append([splint(ia, ib, tck), spalde(dx, tck)])
|
||||
|
||||
k = 1
|
||||
for r in nk:
|
||||
d = 0
|
||||
for dr in r[1]:
|
||||
tol = err_est(k, d)
|
||||
assert_allclose(dr, f1(dx, d), atol=0, rtol=tol)
|
||||
d = d+1
|
||||
k = k+1
|
||||
|
||||
def test_smoke_splrep_splev(self):
|
||||
self.check_1(s=1e-6)
|
||||
self.check_1(b=1.5*np.pi)
|
||||
self.check_1(b=1.5*np.pi, xe=2*np.pi, per=1, s=1e-1)
|
||||
|
||||
@pytest.mark.parametrize('per', [0, 1])
|
||||
@pytest.mark.parametrize('at_nodes', [True, False])
|
||||
def test_smoke_splrep_splev_2(self, per, at_nodes):
|
||||
self.check_1(per=per, at_nodes=at_nodes)
|
||||
|
||||
@pytest.mark.parametrize('N', [20, 50])
|
||||
@pytest.mark.parametrize('per', [0, 1])
|
||||
def test_smoke_splint_spalde(self, N, per):
|
||||
self.check_2(per=per, N=N)
|
||||
|
||||
@pytest.mark.parametrize('N', [20, 50])
|
||||
@pytest.mark.parametrize('per', [0, 1])
|
||||
def test_smoke_splint_spalde_iaib(self, N, per):
|
||||
self.check_2(ia=0.2*np.pi, ib=np.pi, N=N, per=per)
|
||||
|
||||
def test_smoke_sproot(self):
|
||||
# sproot is only implemented for k=3
|
||||
a, b = 0.1, 15
|
||||
x = np.linspace(a, b, 20)
|
||||
v = np.sin(x)
|
||||
|
||||
for k in [1, 2, 4, 5]:
|
||||
tck = splrep(x, v, s=0, per=0, k=k, xe=b)
|
||||
with assert_raises(ValueError):
|
||||
sproot(tck)
|
||||
|
||||
k = 3
|
||||
tck = splrep(x, v, s=0, k=3)
|
||||
roots = sproot(tck)
|
||||
assert_allclose(splev(roots, tck), 0, atol=1e-10, rtol=1e-10)
|
||||
assert_allclose(roots, np.pi * np.array([1, 2, 3, 4]), rtol=1e-3)
|
||||
|
||||
@pytest.mark.parametrize('N', [20, 50])
|
||||
@pytest.mark.parametrize('k', [1, 2, 3, 4, 5])
|
||||
def test_smoke_splprep_splrep_splev(self, N, k):
|
||||
a, b, dx = 0, 2.*np.pi, 0.2*np.pi
|
||||
x = np.linspace(a, b, N+1) # nodes
|
||||
v = np.sin(x)
|
||||
|
||||
tckp, u = splprep([x, v], s=0, per=0, k=k, nest=-1)
|
||||
uv = splev(dx, tckp)
|
||||
err1 = abs(uv[1] - np.sin(uv[0]))
|
||||
assert err1 < 1e-2
|
||||
|
||||
tck = splrep(x, v, s=0, per=0, k=k)
|
||||
err2 = abs(splev(uv[0], tck) - np.sin(uv[0]))
|
||||
assert err2 < 1e-2
|
||||
|
||||
# Derivatives of parametric cubic spline at u (first function)
|
||||
if k == 3:
|
||||
tckp, u = splprep([x, v], s=0, per=0, k=k, nest=-1)
|
||||
for d in range(1, k+1):
|
||||
uv = splev(dx, tckp, d)
|
||||
|
||||
def test_smoke_bisplrep_bisplev(self):
|
||||
xb, xe = 0, 2.*np.pi
|
||||
yb, ye = 0, 2.*np.pi
|
||||
kx, ky = 3, 3
|
||||
Nx, Ny = 20, 20
|
||||
|
||||
def f2(x, y):
|
||||
return np.sin(x+y)
|
||||
|
||||
x = np.linspace(xb, xe, Nx + 1)
|
||||
y = np.linspace(yb, ye, Ny + 1)
|
||||
xy = makepairs(x, y)
|
||||
tck = bisplrep(xy[0], xy[1], f2(xy[0], xy[1]), s=0, kx=kx, ky=ky)
|
||||
|
||||
tt = [tck[0][kx:-kx], tck[1][ky:-ky]]
|
||||
t2 = makepairs(tt[0], tt[1])
|
||||
v1 = bisplev(tt[0], tt[1], tck)
|
||||
v2 = f2(t2[0], t2[1])
|
||||
v2.shape = len(tt[0]), len(tt[1])
|
||||
|
||||
assert norm2(np.ravel(v1 - v2)) < 1e-2
|
||||
|
||||
|
||||
class TestSplev:
|
||||
def test_1d_shape(self):
|
||||
x = [1,2,3,4,5]
|
||||
y = [4,5,6,7,8]
|
||||
tck = splrep(x, y)
|
||||
z = splev([1], tck)
|
||||
assert_equal(z.shape, (1,))
|
||||
z = splev(1, tck)
|
||||
assert_equal(z.shape, ())
|
||||
|
||||
def test_2d_shape(self):
|
||||
x = [1, 2, 3, 4, 5]
|
||||
y = [4, 5, 6, 7, 8]
|
||||
tck = splrep(x, y)
|
||||
t = np.array([[1.0, 1.5, 2.0, 2.5],
|
||||
[3.0, 3.5, 4.0, 4.5]])
|
||||
z = splev(t, tck)
|
||||
z0 = splev(t[0], tck)
|
||||
z1 = splev(t[1], tck)
|
||||
assert_equal(z, np.vstack((z0, z1)))
|
||||
|
||||
def test_extrapolation_modes(self):
|
||||
# test extrapolation modes
|
||||
# * if ext=0, return the extrapolated value.
|
||||
# * if ext=1, return 0
|
||||
# * if ext=2, raise a ValueError
|
||||
# * if ext=3, return the boundary value.
|
||||
x = [1,2,3]
|
||||
y = [0,2,4]
|
||||
tck = splrep(x, y, k=1)
|
||||
|
||||
rstl = [[-2, 6], [0, 0], None, [0, 4]]
|
||||
for ext in (0, 1, 3):
|
||||
assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext])
|
||||
|
||||
assert_raises(ValueError, splev, [0, 4], tck, ext=2)
|
||||
|
||||
|
||||
class TestSplder:
|
||||
def setup_method(self):
|
||||
# non-uniform grid, just to make it sure
|
||||
x = np.linspace(0, 1, 100)**3
|
||||
y = np.sin(20 * x)
|
||||
self.spl = splrep(x, y)
|
||||
|
||||
# double check that knots are non-uniform
|
||||
assert_(np.ptp(np.diff(self.spl[0])) > 0)
|
||||
|
||||
def test_inverse(self):
|
||||
# Check that antiderivative + derivative is identity.
|
||||
for n in range(5):
|
||||
spl2 = splantider(self.spl, n)
|
||||
spl3 = splder(spl2, n)
|
||||
assert_allclose(self.spl[0], spl3[0])
|
||||
assert_allclose(self.spl[1], spl3[1])
|
||||
assert_equal(self.spl[2], spl3[2])
|
||||
|
||||
def test_splder_vs_splev(self):
|
||||
# Check derivative vs. FITPACK
|
||||
|
||||
for n in range(3+1):
|
||||
# Also extrapolation!
|
||||
xx = np.linspace(-1, 2, 2000)
|
||||
if n == 3:
|
||||
# ... except that FITPACK extrapolates strangely for
|
||||
# order 0, so let's not check that.
|
||||
xx = xx[(xx >= 0) & (xx <= 1)]
|
||||
|
||||
dy = splev(xx, self.spl, n)
|
||||
spl2 = splder(self.spl, n)
|
||||
dy2 = splev(xx, spl2)
|
||||
if n == 1:
|
||||
assert_allclose(dy, dy2, rtol=2e-6)
|
||||
else:
|
||||
assert_allclose(dy, dy2)
|
||||
|
||||
def test_splantider_vs_splint(self):
|
||||
# Check antiderivative vs. FITPACK
|
||||
spl2 = splantider(self.spl)
|
||||
|
||||
# no extrapolation, splint assumes function is zero outside
|
||||
# range
|
||||
xx = np.linspace(0, 1, 20)
|
||||
|
||||
for x1 in xx:
|
||||
for x2 in xx:
|
||||
y1 = splint(x1, x2, self.spl)
|
||||
y2 = splev(x2, spl2) - splev(x1, spl2)
|
||||
assert_allclose(y1, y2)
|
||||
|
||||
def test_order0_diff(self):
|
||||
assert_raises(ValueError, splder, self.spl, 4)
|
||||
|
||||
def test_kink(self):
|
||||
# Should refuse to differentiate splines with kinks
|
||||
|
||||
spl2 = insert(0.5, self.spl, m=2)
|
||||
splder(spl2, 2) # Should work
|
||||
assert_raises(ValueError, splder, spl2, 3)
|
||||
|
||||
spl2 = insert(0.5, self.spl, m=3)
|
||||
splder(spl2, 1) # Should work
|
||||
assert_raises(ValueError, splder, spl2, 2)
|
||||
|
||||
spl2 = insert(0.5, self.spl, m=4)
|
||||
assert_raises(ValueError, splder, spl2, 1)
|
||||
|
||||
def test_multidim(self):
|
||||
# c can have trailing dims
|
||||
for n in range(3):
|
||||
t, c, k = self.spl
|
||||
c2 = np.c_[c, c, c]
|
||||
c2 = np.dstack((c2, c2))
|
||||
|
||||
spl2 = splantider((t, c2, k), n)
|
||||
spl3 = splder(spl2, n)
|
||||
|
||||
assert_allclose(t, spl3[0])
|
||||
assert_allclose(c2, spl3[1])
|
||||
assert_equal(k, spl3[2])
|
||||
|
||||
|
||||
class TestSplint:
|
||||
def test_len_c(self):
|
||||
n, k = 7, 3
|
||||
x = np.arange(n)
|
||||
y = x**3
|
||||
t, c, k = splrep(x, y, s=0)
|
||||
|
||||
# note that len(c) == len(t) == 11 (== len(x) + 2*(k-1))
|
||||
assert len(t) == len(c) == n + 2*(k-1)
|
||||
|
||||
# integrate directly: $\int_0^6 x^3 dx = 6^4 / 4$
|
||||
res = splint(0, 6, (t, c, k))
|
||||
assert_allclose(res, 6**4 / 4, atol=1e-15)
|
||||
|
||||
# check that the coefficients past len(t) - k - 1 are ignored
|
||||
c0 = c.copy()
|
||||
c0[len(t)-k-1:] = np.nan
|
||||
res0 = splint(0, 6, (t, c0, k))
|
||||
assert_allclose(res0, 6**4 / 4, atol=1e-15)
|
||||
|
||||
# however, all other coefficients *are* used
|
||||
c0[6] = np.nan
|
||||
assert np.isnan(splint(0, 6, (t, c0, k)))
|
||||
|
||||
# check that the coefficient array can have length `len(t) - k - 1`
|
||||
c1 = c[:len(t) - k - 1]
|
||||
res1 = splint(0, 6, (t, c1, k))
|
||||
assert_allclose(res1, 6**4 / 4, atol=1e-15)
|
||||
|
||||
# however shorter c arrays raise. The error from f2py is a
|
||||
# `dftipack.error`, which is an Exception but not ValueError etc.
|
||||
with assert_raises(Exception, match=r">=n-k-1"):
|
||||
splint(0, 1, (np.ones(10), np.ones(5), 3))
|
||||
|
||||
|
||||
class TestBisplrep:
|
||||
def test_overflow(self):
|
||||
from numpy.lib.stride_tricks import as_strided
|
||||
if dfitpack_int.itemsize == 8:
|
||||
size = 1500000**2
|
||||
else:
|
||||
size = 400**2
|
||||
# Don't allocate a real array, as it's very big, but rely
|
||||
# on that it's not referenced
|
||||
x = as_strided(np.zeros(()), shape=(size,))
|
||||
assert_raises(OverflowError, bisplrep, x, x, x, w=x,
|
||||
xb=0, xe=1, yb=0, ye=1, s=0)
|
||||
|
||||
def test_regression_1310(self):
|
||||
# Regression test for gh-1310
|
||||
with np.load(data_file('bug-1310.npz')) as loaded_data:
|
||||
data = loaded_data['data']
|
||||
|
||||
# Shouldn't crash -- the input data triggers work array sizes
|
||||
# that caused previously some data to not be aligned on
|
||||
# sizeof(double) boundaries in memory, which made the Fortran
|
||||
# code to crash when compiled with -O3
|
||||
bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0,
|
||||
full_output=True)
|
||||
|
||||
@pytest.mark.skipif(dfitpack_int != np.int64, reason="needs ilp64 fitpack")
|
||||
def test_ilp64_bisplrep(self):
|
||||
check_free_memory(28000) # VM size, doesn't actually use the pages
|
||||
x = np.linspace(0, 1, 400)
|
||||
y = np.linspace(0, 1, 400)
|
||||
x, y = np.meshgrid(x, y)
|
||||
z = np.zeros_like(x)
|
||||
tck = bisplrep(x, y, z, kx=3, ky=3, s=0)
|
||||
assert_allclose(bisplev(0.5, 0.5, tck), 0.0)
|
||||
|
||||
|
||||
def test_dblint():
|
||||
# Basic test to see it runs and gives the correct result on a trivial
|
||||
# problem. Note that `dblint` is not exposed in the interpolate namespace.
|
||||
x = np.linspace(0, 1)
|
||||
y = np.linspace(0, 1)
|
||||
xx, yy = np.meshgrid(x, y)
|
||||
rect = RectBivariateSpline(x, y, 4 * xx * yy)
|
||||
tck = list(rect.tck)
|
||||
tck.extend(rect.degrees)
|
||||
|
||||
assert_almost_equal(dblint(0, 1, 0, 1, tck), 1)
|
||||
assert_almost_equal(dblint(0, 0.5, 0, 1, tck), 0.25)
|
||||
assert_almost_equal(dblint(0.5, 1, 0, 1, tck), 0.75)
|
||||
assert_almost_equal(dblint(-100, 100, -100, 100, tck), 1)
|
||||
|
||||
|
||||
def test_splev_der_k():
|
||||
# regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
|
||||
# for x outside of knot range
|
||||
|
||||
# test case from gh-2188
|
||||
tck = (np.array([0., 0., 2.5, 2.5]),
|
||||
np.array([-1.56679978, 2.43995873, 0., 0.]),
|
||||
1)
|
||||
t, c, k = tck
|
||||
x = np.array([-3, 0, 2.5, 3])
|
||||
|
||||
# an explicit form of the linear spline
|
||||
assert_allclose(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2])
|
||||
assert_allclose(splev(x, tck, 1), (c[1]-c[0]) / t[2])
|
||||
|
||||
# now check a random spline vs splder
|
||||
np.random.seed(1234)
|
||||
x = np.sort(np.random.random(30))
|
||||
y = np.random.random(30)
|
||||
t, c, k = splrep(x, y)
|
||||
|
||||
x = [t[0] - 1., t[-1] + 1.]
|
||||
tck2 = splder((t, c, k), k)
|
||||
assert_allclose(splev(x, (t, c, k), k), splev(x, tck2))
|
||||
|
||||
|
||||
def test_splprep_segfault():
|
||||
# regression test for gh-3847: splprep segfaults if knots are specified
|
||||
# for task=-1
|
||||
t = np.arange(0, 1.1, 0.1)
|
||||
x = np.sin(2*np.pi*t)
|
||||
y = np.cos(2*np.pi*t)
|
||||
tck, u = splprep([x, y], s=0)
|
||||
np.arange(0, 1.01, 0.01)
|
||||
|
||||
uknots = tck[0] # using the knots from the previous fitting
|
||||
tck, u = splprep([x, y], task=-1, t=uknots) # here is the crash
|
||||
|
||||
|
||||
def test_bisplev_integer_overflow():
|
||||
np.random.seed(1)
|
||||
|
||||
x = np.linspace(0, 1, 11)
|
||||
y = x
|
||||
z = np.random.randn(11, 11).ravel()
|
||||
kx = 1
|
||||
ky = 1
|
||||
|
||||
nx, tx, ny, ty, c, fp, ier = regrid_smth(
|
||||
x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0)
|
||||
tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky)
|
||||
|
||||
xp = np.zeros([2621440])
|
||||
yp = np.zeros([2621440])
|
||||
|
||||
assert_raises((RuntimeError, MemoryError), bisplev, xp, yp, tck)
|
||||
|
||||
|
||||
@pytest.mark.xslow
|
||||
def test_gh_1766():
|
||||
# this should fail gracefully instead of segfaulting (int overflow)
|
||||
size = 22
|
||||
kx, ky = 3, 3
|
||||
def f2(x, y):
|
||||
return np.sin(x+y)
|
||||
|
||||
x = np.linspace(0, 10, size)
|
||||
y = np.linspace(50, 700, size)
|
||||
xy = makepairs(x, y)
|
||||
tck = bisplrep(xy[0], xy[1], f2(xy[0], xy[1]), s=0, kx=kx, ky=ky)
|
||||
# the size value here can either segfault
|
||||
# or produce a MemoryError on main
|
||||
tx_ty_size = 500000
|
||||
tck[0] = np.arange(tx_ty_size)
|
||||
tck[1] = np.arange(tx_ty_size) * 4
|
||||
tt_0 = np.arange(50)
|
||||
tt_1 = np.arange(50) * 3
|
||||
with pytest.raises(MemoryError):
|
||||
bisplev(tt_0, tt_1, tck, 1, 1)
|
||||
|
||||
|
||||
def test_spalde_scalar_input():
|
||||
# Ticket #629
|
||||
x = np.linspace(0, 10)
|
||||
y = x**3
|
||||
tck = splrep(x, y, k=3, t=[5])
|
||||
res = spalde(np.float64(1), tck)
|
||||
des = np.array([1., 3., 6., 6.])
|
||||
assert_almost_equal(res, des)
|
||||
|
||||
|
||||
def test_spalde_nc():
|
||||
# regression test for https://github.com/scipy/scipy/issues/19002
|
||||
# here len(t) = 29 and len(c) = 25 (== len(t) - k - 1)
|
||||
x = np.asarray([-10., -9., -8., -7., -6., -5., -4., -3., -2.5, -2., -1.5,
|
||||
-1., -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 4., 5., 6.],
|
||||
dtype="float")
|
||||
t = [-10.0, -10.0, -10.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0,
|
||||
-2.5, -2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0,
|
||||
5.0, 6.0, 6.0, 6.0, 6.0]
|
||||
c = np.asarray([1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
|
||||
0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
|
||||
k = 3
|
||||
|
||||
res = spalde(x, (t, c, k))
|
||||
res_splev = np.asarray([splev(x, (t, c, k), nu) for nu in range(4)])
|
||||
assert_allclose(res, res_splev.T, atol=1e-15)
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,65 @@
|
||||
import itertools
|
||||
import threading
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_equal
|
||||
import pytest
|
||||
import scipy.interpolate
|
||||
|
||||
|
||||
class TestGIL:
|
||||
"""Check if the GIL is properly released by scipy.interpolate functions."""
|
||||
|
||||
def setup_method(self):
|
||||
self.messages = []
|
||||
|
||||
def log(self, message):
|
||||
self.messages.append(message)
|
||||
|
||||
def make_worker_thread(self, target, args):
|
||||
log = self.log
|
||||
|
||||
class WorkerThread(threading.Thread):
|
||||
def run(self):
|
||||
log('interpolation started')
|
||||
target(*args)
|
||||
log('interpolation complete')
|
||||
|
||||
return WorkerThread()
|
||||
|
||||
@pytest.mark.xslow
|
||||
@pytest.mark.xfail(reason='race conditions, may depend on system load')
|
||||
def test_rectbivariatespline(self):
|
||||
def generate_params(n_points):
|
||||
x = y = np.linspace(0, 1000, n_points)
|
||||
x_grid, y_grid = np.meshgrid(x, y)
|
||||
z = x_grid * y_grid
|
||||
return x, y, z
|
||||
|
||||
def calibrate_delay(requested_time):
|
||||
for n_points in itertools.count(5000, 1000):
|
||||
args = generate_params(n_points)
|
||||
time_started = time.time()
|
||||
interpolate(*args)
|
||||
if time.time() - time_started > requested_time:
|
||||
return args
|
||||
|
||||
def interpolate(x, y, z):
|
||||
scipy.interpolate.RectBivariateSpline(x, y, z)
|
||||
|
||||
args = calibrate_delay(requested_time=3)
|
||||
worker_thread = self.make_worker_thread(interpolate, args)
|
||||
worker_thread.start()
|
||||
for i in range(3):
|
||||
time.sleep(0.5)
|
||||
self.log('working')
|
||||
worker_thread.join()
|
||||
assert_equal(self.messages, [
|
||||
'interpolation started',
|
||||
'working',
|
||||
'working',
|
||||
'working',
|
||||
'interpolation complete',
|
||||
])
|
||||
|
||||
@ -0,0 +1,436 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,
|
||||
suppress_warnings)
|
||||
from pytest import raises as assert_raises
|
||||
import pytest
|
||||
|
||||
from scipy._lib._testutils import check_free_memory
|
||||
import scipy.interpolate.interpnd as interpnd
|
||||
import scipy.spatial._qhull as qhull
|
||||
|
||||
import pickle
|
||||
import threading
|
||||
|
||||
_IS_32BIT = (sys.maxsize < 2**32)
|
||||
|
||||
|
||||
def data_file(basename):
|
||||
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||
'data', basename)
|
||||
|
||||
|
||||
class TestLinearNDInterpolation:
|
||||
def test_smoketest(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
|
||||
yi = interpnd.LinearNDInterpolator(x, y)(x)
|
||||
assert_almost_equal(y, yi)
|
||||
|
||||
def test_smoketest_alternate(self):
|
||||
# Test at single points, alternate calling convention
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
|
||||
yi = interpnd.LinearNDInterpolator((x[:,0], x[:,1]), y)(x[:,0], x[:,1])
|
||||
assert_almost_equal(y, yi)
|
||||
|
||||
def test_complex_smoketest(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
yi = interpnd.LinearNDInterpolator(x, y)(x)
|
||||
assert_almost_equal(y, yi)
|
||||
|
||||
def test_tri_input(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
tri = qhull.Delaunay(x)
|
||||
yi = interpnd.LinearNDInterpolator(tri, y)(x)
|
||||
assert_almost_equal(y, yi)
|
||||
|
||||
def test_square(self):
|
||||
# Test barycentric interpolation on a square against a manual
|
||||
# implementation
|
||||
|
||||
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.float64)
|
||||
values = np.array([1., 2., -3., 5.], dtype=np.float64)
|
||||
|
||||
# NB: assume triangles (0, 1, 3) and (1, 2, 3)
|
||||
#
|
||||
# 1----2
|
||||
# | \ |
|
||||
# | \ |
|
||||
# 0----3
|
||||
|
||||
def ip(x, y):
|
||||
t1 = (x + y <= 1)
|
||||
t2 = ~t1
|
||||
|
||||
x1 = x[t1]
|
||||
y1 = y[t1]
|
||||
|
||||
x2 = x[t2]
|
||||
y2 = y[t2]
|
||||
|
||||
z = 0*x
|
||||
|
||||
z[t1] = (values[0]*(1 - x1 - y1)
|
||||
+ values[1]*y1
|
||||
+ values[3]*x1)
|
||||
|
||||
z[t2] = (values[2]*(x2 + y2 - 1)
|
||||
+ values[1]*(1 - x2)
|
||||
+ values[3]*(1 - y2))
|
||||
return z
|
||||
|
||||
xx, yy = np.broadcast_arrays(np.linspace(0, 1, 14)[:,None],
|
||||
np.linspace(0, 1, 14)[None,:])
|
||||
xx = xx.ravel()
|
||||
yy = yy.ravel()
|
||||
|
||||
xi = np.array([xx, yy]).T.copy()
|
||||
zi = interpnd.LinearNDInterpolator(points, values)(xi)
|
||||
|
||||
assert_almost_equal(zi, ip(xx, yy))
|
||||
|
||||
def test_smoketest_rescale(self):
|
||||
# Test at single points
|
||||
x = np.array([(0, 0), (-5, -5), (-5, 5), (5, 5), (2.5, 3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
|
||||
yi = interpnd.LinearNDInterpolator(x, y, rescale=True)(x)
|
||||
assert_almost_equal(y, yi)
|
||||
|
||||
def test_square_rescale(self):
|
||||
# Test barycentric interpolation on a rectangle with rescaling
|
||||
# agaings the same implementation without rescaling
|
||||
|
||||
points = np.array([(0,0), (0,100), (10,100), (10,0)], dtype=np.float64)
|
||||
values = np.array([1., 2., -3., 5.], dtype=np.float64)
|
||||
|
||||
xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
|
||||
np.linspace(0, 100, 14)[None,:])
|
||||
xx = xx.ravel()
|
||||
yy = yy.ravel()
|
||||
xi = np.array([xx, yy]).T.copy()
|
||||
zi = interpnd.LinearNDInterpolator(points, values)(xi)
|
||||
zi_rescaled = interpnd.LinearNDInterpolator(points, values,
|
||||
rescale=True)(xi)
|
||||
|
||||
assert_almost_equal(zi, zi_rescaled)
|
||||
|
||||
def test_tripoints_input_rescale(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
tri = qhull.Delaunay(x)
|
||||
yi = interpnd.LinearNDInterpolator(tri.points, y)(x)
|
||||
yi_rescale = interpnd.LinearNDInterpolator(tri.points, y,
|
||||
rescale=True)(x)
|
||||
assert_almost_equal(yi, yi_rescale)
|
||||
|
||||
def test_tri_input_rescale(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
tri = qhull.Delaunay(x)
|
||||
match = ("Rescaling is not supported when passing a "
|
||||
"Delaunay triangulation as ``points``.")
|
||||
with pytest.raises(ValueError, match=match):
|
||||
interpnd.LinearNDInterpolator(tri, y, rescale=True)(x)
|
||||
|
||||
def test_pickle(self):
|
||||
# Test at single points
|
||||
np.random.seed(1234)
|
||||
x = np.random.rand(30, 2)
|
||||
y = np.random.rand(30) + 1j*np.random.rand(30)
|
||||
|
||||
ip = interpnd.LinearNDInterpolator(x, y)
|
||||
ip2 = pickle.loads(pickle.dumps(ip))
|
||||
|
||||
assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(_IS_32BIT, reason='it fails on 32-bit')
|
||||
def test_threading(self):
|
||||
# This test was taken from issue 8856
|
||||
# https://github.com/scipy/scipy/issues/8856
|
||||
check_free_memory(10000)
|
||||
|
||||
r_ticks = np.arange(0, 4200, 10)
|
||||
phi_ticks = np.arange(0, 4200, 10)
|
||||
r_grid, phi_grid = np.meshgrid(r_ticks, phi_ticks)
|
||||
|
||||
def do_interp(interpolator, slice_rows, slice_cols):
|
||||
grid_x, grid_y = np.mgrid[slice_rows, slice_cols]
|
||||
res = interpolator((grid_x, grid_y))
|
||||
return res
|
||||
|
||||
points = np.vstack((r_grid.ravel(), phi_grid.ravel())).T
|
||||
values = (r_grid * phi_grid).ravel()
|
||||
interpolator = interpnd.LinearNDInterpolator(points, values)
|
||||
|
||||
worker_thread_1 = threading.Thread(
|
||||
target=do_interp,
|
||||
args=(interpolator, slice(0, 2100), slice(0, 2100)))
|
||||
worker_thread_2 = threading.Thread(
|
||||
target=do_interp,
|
||||
args=(interpolator, slice(2100, 4200), slice(0, 2100)))
|
||||
worker_thread_3 = threading.Thread(
|
||||
target=do_interp,
|
||||
args=(interpolator, slice(0, 2100), slice(2100, 4200)))
|
||||
worker_thread_4 = threading.Thread(
|
||||
target=do_interp,
|
||||
args=(interpolator, slice(2100, 4200), slice(2100, 4200)))
|
||||
|
||||
worker_thread_1.start()
|
||||
worker_thread_2.start()
|
||||
worker_thread_3.start()
|
||||
worker_thread_4.start()
|
||||
|
||||
worker_thread_1.join()
|
||||
worker_thread_2.join()
|
||||
worker_thread_3.join()
|
||||
worker_thread_4.join()
|
||||
|
||||
|
||||
class TestEstimateGradients2DGlobal:
|
||||
def test_smoketest(self):
|
||||
x = np.array([(0, 0), (0, 2),
|
||||
(1, 0), (1, 2), (0.25, 0.75), (0.6, 0.8)], dtype=float)
|
||||
tri = qhull.Delaunay(x)
|
||||
|
||||
# Should be exact for linear functions, independent of triangulation
|
||||
|
||||
funcs = [
|
||||
(lambda x, y: 0*x + 1, (0, 0)),
|
||||
(lambda x, y: 0 + x, (1, 0)),
|
||||
(lambda x, y: -2 + y, (0, 1)),
|
||||
(lambda x, y: 3 + 3*x + 14.15*y, (3, 14.15))
|
||||
]
|
||||
|
||||
for j, (func, grad) in enumerate(funcs):
|
||||
z = func(x[:,0], x[:,1])
|
||||
dz = interpnd.estimate_gradients_2d_global(tri, z, tol=1e-6)
|
||||
|
||||
assert_equal(dz.shape, (6, 2))
|
||||
assert_allclose(dz, np.array(grad)[None,:] + 0*dz,
|
||||
rtol=1e-5, atol=1e-5, err_msg="item %d" % j)
|
||||
|
||||
def test_regression_2359(self):
|
||||
# Check regression --- for certain point sets, gradient
|
||||
# estimation could end up in an infinite loop
|
||||
points = np.load(data_file('estimate_gradients_hang.npy'))
|
||||
values = np.random.rand(points.shape[0])
|
||||
tri = qhull.Delaunay(points)
|
||||
|
||||
# This should not hang
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(interpnd.GradientEstimationWarning,
|
||||
"Gradient estimation did not converge")
|
||||
interpnd.estimate_gradients_2d_global(tri, values, maxiter=1)
|
||||
|
||||
|
||||
class TestCloughTocher2DInterpolator:
|
||||
|
||||
def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False,
|
||||
rescale=False, **kw):
|
||||
np.random.seed(1234)
|
||||
if x is None:
|
||||
x = np.array([(0, 0), (0, 1),
|
||||
(1, 0), (1, 1), (0.25, 0.75), (0.6, 0.8),
|
||||
(0.5, 0.2)],
|
||||
dtype=float)
|
||||
|
||||
if not alternate:
|
||||
ip = interpnd.CloughTocher2DInterpolator(x, func(x[:,0], x[:,1]),
|
||||
tol=1e-6, rescale=rescale)
|
||||
else:
|
||||
ip = interpnd.CloughTocher2DInterpolator((x[:,0], x[:,1]),
|
||||
func(x[:,0], x[:,1]),
|
||||
tol=1e-6, rescale=rescale)
|
||||
|
||||
p = np.random.rand(50, 2)
|
||||
|
||||
if not alternate:
|
||||
a = ip(p)
|
||||
else:
|
||||
a = ip(p[:,0], p[:,1])
|
||||
b = func(p[:,0], p[:,1])
|
||||
|
||||
try:
|
||||
assert_allclose(a, b, **kw)
|
||||
except AssertionError:
|
||||
print("_check_accuracy: abs(a-b):", abs(a - b))
|
||||
print("ip.grad:", ip.grad)
|
||||
raise
|
||||
|
||||
def test_linear_smoketest(self):
|
||||
# Should be exact for linear functions, independent of triangulation
|
||||
funcs = [
|
||||
lambda x, y: 0*x + 1,
|
||||
lambda x, y: 0 + x,
|
||||
lambda x, y: -2 + y,
|
||||
lambda x, y: 3 + 3*x + 14.15*y,
|
||||
]
|
||||
|
||||
for j, func in enumerate(funcs):
|
||||
self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
|
||||
err_msg="Function %d" % j)
|
||||
self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
|
||||
alternate=True,
|
||||
err_msg="Function (alternate) %d" % j)
|
||||
# check rescaling
|
||||
self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
|
||||
err_msg="Function (rescaled) %d" % j, rescale=True)
|
||||
self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
|
||||
alternate=True, rescale=True,
|
||||
err_msg="Function (alternate, rescaled) %d" % j)
|
||||
|
||||
def test_quadratic_smoketest(self):
|
||||
# Should be reasonably accurate for quadratic functions
|
||||
funcs = [
|
||||
lambda x, y: x**2,
|
||||
lambda x, y: y**2,
|
||||
lambda x, y: x**2 - y**2,
|
||||
lambda x, y: x*y,
|
||||
]
|
||||
|
||||
for j, func in enumerate(funcs):
|
||||
self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,
|
||||
err_msg="Function %d" % j)
|
||||
self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,
|
||||
err_msg="Function %d" % j, rescale=True)
|
||||
|
||||
def test_tri_input(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
tri = qhull.Delaunay(x)
|
||||
yi = interpnd.CloughTocher2DInterpolator(tri, y)(x)
|
||||
assert_almost_equal(y, yi)
|
||||
|
||||
def test_tri_input_rescale(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
tri = qhull.Delaunay(x)
|
||||
match = ("Rescaling is not supported when passing a "
|
||||
"Delaunay triangulation as ``points``.")
|
||||
with pytest.raises(ValueError, match=match):
|
||||
interpnd.CloughTocher2DInterpolator(tri, y, rescale=True)(x)
|
||||
|
||||
def test_tripoints_input_rescale(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
tri = qhull.Delaunay(x)
|
||||
yi = interpnd.CloughTocher2DInterpolator(tri.points, y)(x)
|
||||
yi_rescale = interpnd.CloughTocher2DInterpolator(tri.points, y, rescale=True)(x)
|
||||
assert_almost_equal(yi, yi_rescale)
|
||||
|
||||
@pytest.mark.fail_slow(2)
|
||||
def test_dense(self):
|
||||
# Should be more accurate for dense meshes
|
||||
funcs = [
|
||||
lambda x, y: x**2,
|
||||
lambda x, y: y**2,
|
||||
lambda x, y: x**2 - y**2,
|
||||
lambda x, y: x*y,
|
||||
lambda x, y: np.cos(2*np.pi*x)*np.sin(2*np.pi*y)
|
||||
]
|
||||
|
||||
np.random.seed(4321) # use a different seed than the check!
|
||||
grid = np.r_[np.array([(0,0), (0,1), (1,0), (1,1)], dtype=float),
|
||||
np.random.rand(30*30, 2)]
|
||||
|
||||
for j, func in enumerate(funcs):
|
||||
self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
|
||||
err_msg="Function %d" % j)
|
||||
self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
|
||||
err_msg="Function %d" % j, rescale=True)
|
||||
|
||||
def test_wrong_ndim(self):
|
||||
x = np.random.randn(30, 3)
|
||||
y = np.random.randn(30)
|
||||
assert_raises(ValueError, interpnd.CloughTocher2DInterpolator, x, y)
|
||||
|
||||
def test_pickle(self):
|
||||
# Test at single points
|
||||
np.random.seed(1234)
|
||||
x = np.random.rand(30, 2)
|
||||
y = np.random.rand(30) + 1j*np.random.rand(30)
|
||||
|
||||
ip = interpnd.CloughTocher2DInterpolator(x, y)
|
||||
ip2 = pickle.loads(pickle.dumps(ip))
|
||||
|
||||
assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
|
||||
|
||||
def test_boundary_tri_symmetry(self):
|
||||
# Interpolation at neighbourless triangles should retain
|
||||
# symmetry with mirroring the triangle.
|
||||
|
||||
# Equilateral triangle
|
||||
points = np.array([(0, 0), (1, 0), (0.5, np.sqrt(3)/2)])
|
||||
values = np.array([1, 0, 0])
|
||||
|
||||
ip = interpnd.CloughTocher2DInterpolator(points, values)
|
||||
|
||||
# Set gradient to zero at vertices
|
||||
ip.grad[...] = 0
|
||||
|
||||
# Interpolation should be symmetric vs. bisector
|
||||
alpha = 0.3
|
||||
p1 = np.array([0.5 * np.cos(alpha), 0.5 * np.sin(alpha)])
|
||||
p2 = np.array([0.5 * np.cos(np.pi/3 - alpha), 0.5 * np.sin(np.pi/3 - alpha)])
|
||||
|
||||
v1 = ip(p1)
|
||||
v2 = ip(p2)
|
||||
assert_allclose(v1, v2)
|
||||
|
||||
# ... and affine invariant
|
||||
np.random.seed(1)
|
||||
A = np.random.randn(2, 2)
|
||||
b = np.random.randn(2)
|
||||
|
||||
points = A.dot(points.T).T + b[None,:]
|
||||
p1 = A.dot(p1) + b
|
||||
p2 = A.dot(p2) + b
|
||||
|
||||
ip = interpnd.CloughTocher2DInterpolator(points, values)
|
||||
ip.grad[...] = 0
|
||||
|
||||
w1 = ip(p1)
|
||||
w2 = ip(p2)
|
||||
assert_allclose(w1, v1)
|
||||
assert_allclose(w2, v2)
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,284 @@
|
||||
import numpy as np
|
||||
from numpy.testing import assert_equal, assert_array_equal, assert_allclose
|
||||
import pytest
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from scipy.interpolate import (griddata, NearestNDInterpolator,
|
||||
LinearNDInterpolator,
|
||||
CloughTocher2DInterpolator)
|
||||
|
||||
|
||||
parametrize_interpolators = pytest.mark.parametrize(
|
||||
"interpolator", [NearestNDInterpolator, LinearNDInterpolator,
|
||||
CloughTocher2DInterpolator]
|
||||
)
|
||||
|
||||
class TestGriddata:
|
||||
def test_fill_value(self):
|
||||
x = [(0,0), (0,1), (1,0)]
|
||||
y = [1, 2, 3]
|
||||
|
||||
yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1)
|
||||
assert_array_equal(yi, [-1., -1, 1])
|
||||
|
||||
yi = griddata(x, y, [(1,1), (1,2), (0,0)])
|
||||
assert_array_equal(yi, [np.nan, np.nan, 1])
|
||||
|
||||
def test_alternative_call(self):
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = (np.arange(x.shape[0], dtype=np.float64)[:,None]
|
||||
+ np.array([0,1])[None,:])
|
||||
|
||||
for method in ('nearest', 'linear', 'cubic'):
|
||||
for rescale in (True, False):
|
||||
msg = repr((method, rescale))
|
||||
yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method,
|
||||
rescale=rescale)
|
||||
assert_allclose(y, yi, atol=1e-14, err_msg=msg)
|
||||
|
||||
def test_multivalue_2d(self):
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = (np.arange(x.shape[0], dtype=np.float64)[:,None]
|
||||
+ np.array([0,1])[None,:])
|
||||
|
||||
for method in ('nearest', 'linear', 'cubic'):
|
||||
for rescale in (True, False):
|
||||
msg = repr((method, rescale))
|
||||
yi = griddata(x, y, x, method=method, rescale=rescale)
|
||||
assert_allclose(y, yi, atol=1e-14, err_msg=msg)
|
||||
|
||||
def test_multipoint_2d(self):
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
|
||||
xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
|
||||
|
||||
for method in ('nearest', 'linear', 'cubic'):
|
||||
for rescale in (True, False):
|
||||
msg = repr((method, rescale))
|
||||
yi = griddata(x, y, xi, method=method, rescale=rescale)
|
||||
|
||||
assert_equal(yi.shape, (5, 3), err_msg=msg)
|
||||
assert_allclose(yi, np.tile(y[:,None], (1, 3)),
|
||||
atol=1e-14, err_msg=msg)
|
||||
|
||||
def test_complex_2d(self):
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 2j*y[::-1]
|
||||
|
||||
xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
|
||||
|
||||
for method in ('nearest', 'linear', 'cubic'):
|
||||
for rescale in (True, False):
|
||||
msg = repr((method, rescale))
|
||||
yi = griddata(x, y, xi, method=method, rescale=rescale)
|
||||
|
||||
assert_equal(yi.shape, (5, 3), err_msg=msg)
|
||||
assert_allclose(yi, np.tile(y[:,None], (1, 3)),
|
||||
atol=1e-14, err_msg=msg)
|
||||
|
||||
def test_1d(self):
|
||||
x = np.array([1, 2.5, 3, 4.5, 5, 6])
|
||||
y = np.array([1, 2, 0, 3.9, 2, 1])
|
||||
|
||||
for method in ('nearest', 'linear', 'cubic'):
|
||||
assert_allclose(griddata(x, y, x, method=method), y,
|
||||
err_msg=method, atol=1e-14)
|
||||
assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
|
||||
err_msg=method, atol=1e-14)
|
||||
assert_allclose(griddata((x,), y, (x,), method=method), y,
|
||||
err_msg=method, atol=1e-14)
|
||||
|
||||
def test_1d_borders(self):
|
||||
# Test for nearest neighbor case with xi outside
|
||||
# the range of the values.
|
||||
x = np.array([1, 2.5, 3, 4.5, 5, 6])
|
||||
y = np.array([1, 2, 0, 3.9, 2, 1])
|
||||
xi = np.array([0.9, 6.5])
|
||||
yi_should = np.array([1.0, 1.0])
|
||||
|
||||
method = 'nearest'
|
||||
assert_allclose(griddata(x, y, xi,
|
||||
method=method), yi_should,
|
||||
err_msg=method,
|
||||
atol=1e-14)
|
||||
assert_allclose(griddata(x.reshape(6, 1), y, xi,
|
||||
method=method), yi_should,
|
||||
err_msg=method,
|
||||
atol=1e-14)
|
||||
assert_allclose(griddata((x, ), y, (xi, ),
|
||||
method=method), yi_should,
|
||||
err_msg=method,
|
||||
atol=1e-14)
|
||||
|
||||
def test_1d_unsorted(self):
|
||||
x = np.array([2.5, 1, 4.5, 5, 6, 3])
|
||||
y = np.array([1, 2, 0, 3.9, 2, 1])
|
||||
|
||||
for method in ('nearest', 'linear', 'cubic'):
|
||||
assert_allclose(griddata(x, y, x, method=method), y,
|
||||
err_msg=method, atol=1e-10)
|
||||
assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
|
||||
err_msg=method, atol=1e-10)
|
||||
assert_allclose(griddata((x,), y, (x,), method=method), y,
|
||||
err_msg=method, atol=1e-10)
|
||||
|
||||
def test_square_rescale_manual(self):
|
||||
points = np.array([(0,0), (0,100), (10,100), (10,0), (1, 5)], dtype=np.float64)
|
||||
points_rescaled = np.array([(0,0), (0,1), (1,1), (1,0), (0.1, 0.05)],
|
||||
dtype=np.float64)
|
||||
values = np.array([1., 2., -3., 5., 9.], dtype=np.float64)
|
||||
|
||||
xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
|
||||
np.linspace(0, 100, 14)[None,:])
|
||||
xx = xx.ravel()
|
||||
yy = yy.ravel()
|
||||
xi = np.array([xx, yy]).T.copy()
|
||||
|
||||
for method in ('nearest', 'linear', 'cubic'):
|
||||
msg = method
|
||||
zi = griddata(points_rescaled, values, xi/np.array([10, 100.]),
|
||||
method=method)
|
||||
zi_rescaled = griddata(points, values, xi, method=method,
|
||||
rescale=True)
|
||||
assert_allclose(zi, zi_rescaled, err_msg=msg,
|
||||
atol=1e-12)
|
||||
|
||||
def test_xi_1d(self):
|
||||
# Check that 1-D xi is interpreted as a coordinate
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 2j*y[::-1]
|
||||
|
||||
xi = np.array([0.5, 0.5])
|
||||
|
||||
for method in ('nearest', 'linear', 'cubic'):
|
||||
p1 = griddata(x, y, xi, method=method)
|
||||
p2 = griddata(x, y, xi[None,:], method=method)
|
||||
assert_allclose(p1, p2, err_msg=method)
|
||||
|
||||
xi1 = np.array([0.5])
|
||||
xi3 = np.array([0.5, 0.5, 0.5])
|
||||
assert_raises(ValueError, griddata, x, y, xi1,
|
||||
method=method)
|
||||
assert_raises(ValueError, griddata, x, y, xi3,
|
||||
method=method)
|
||||
|
||||
|
||||
class TestNearestNDInterpolator:
|
||||
def test_nearest_options(self):
|
||||
# smoke test that NearestNDInterpolator accept cKDTree options
|
||||
npts, nd = 4, 3
|
||||
x = np.arange(npts*nd).reshape((npts, nd))
|
||||
y = np.arange(npts)
|
||||
nndi = NearestNDInterpolator(x, y)
|
||||
|
||||
opts = {'balanced_tree': False, 'compact_nodes': False}
|
||||
nndi_o = NearestNDInterpolator(x, y, tree_options=opts)
|
||||
assert_allclose(nndi(x), nndi_o(x), atol=1e-14)
|
||||
|
||||
def test_nearest_list_argument(self):
|
||||
nd = np.array([[0, 0, 0, 0, 1, 0, 1],
|
||||
[0, 0, 0, 0, 0, 1, 1],
|
||||
[0, 0, 0, 0, 1, 1, 2]])
|
||||
d = nd[:, 3:]
|
||||
|
||||
# z is np.array
|
||||
NI = NearestNDInterpolator((d[0], d[1]), d[2])
|
||||
assert_array_equal(NI([0.1, 0.9], [0.1, 0.9]), [0, 2])
|
||||
|
||||
# z is list
|
||||
NI = NearestNDInterpolator((d[0], d[1]), list(d[2]))
|
||||
assert_array_equal(NI([0.1, 0.9], [0.1, 0.9]), [0, 2])
|
||||
|
||||
def test_nearest_query_options(self):
|
||||
nd = np.array([[0, 0.5, 0, 1],
|
||||
[0, 0, 0.5, 1],
|
||||
[0, 1, 1, 2]])
|
||||
delta = 0.1
|
||||
query_points = [0 + delta, 1 + delta], [0 + delta, 1 + delta]
|
||||
|
||||
# case 1 - query max_dist is smaller than
|
||||
# the query points' nearest distance to nd.
|
||||
NI = NearestNDInterpolator((nd[0], nd[1]), nd[2])
|
||||
distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) - 1e-7
|
||||
assert_array_equal(NI(query_points, distance_upper_bound=distance_upper_bound),
|
||||
[np.nan, np.nan])
|
||||
|
||||
# case 2 - query p is inf, will return [0, 2]
|
||||
distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) - 1e-7
|
||||
p = np.inf
|
||||
assert_array_equal(
|
||||
NI(query_points, distance_upper_bound=distance_upper_bound, p=p),
|
||||
[0, 2]
|
||||
)
|
||||
|
||||
# case 3 - query max_dist is larger, so should return non np.nan
|
||||
distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) + 1e-7
|
||||
assert_array_equal(
|
||||
NI(query_points, distance_upper_bound=distance_upper_bound),
|
||||
[0, 2]
|
||||
)
|
||||
|
||||
def test_nearest_query_valid_inputs(self):
|
||||
nd = np.array([[0, 1, 0, 1],
|
||||
[0, 0, 1, 1],
|
||||
[0, 1, 1, 2]])
|
||||
NI = NearestNDInterpolator((nd[0], nd[1]), nd[2])
|
||||
with assert_raises(TypeError):
|
||||
NI([0.5, 0.5], query_options="not a dictionary")
|
||||
|
||||
|
||||
class TestNDInterpolators:
|
||||
@parametrize_interpolators
|
||||
def test_broadcastable_input(self, interpolator):
|
||||
# input data
|
||||
np.random.seed(0)
|
||||
x = np.random.random(10)
|
||||
y = np.random.random(10)
|
||||
z = np.hypot(x, y)
|
||||
|
||||
# x-y grid for interpolation
|
||||
X = np.linspace(min(x), max(x))
|
||||
Y = np.linspace(min(y), max(y))
|
||||
X, Y = np.meshgrid(X, Y)
|
||||
XY = np.vstack((X.ravel(), Y.ravel())).T
|
||||
interp = interpolator(list(zip(x, y)), z)
|
||||
# single array input
|
||||
interp_points0 = interp(XY)
|
||||
# tuple input
|
||||
interp_points1 = interp((X, Y))
|
||||
interp_points2 = interp((X, 0.0))
|
||||
# broadcastable input
|
||||
interp_points3 = interp(X, Y)
|
||||
interp_points4 = interp(X, 0.0)
|
||||
|
||||
assert_equal(interp_points0.size ==
|
||||
interp_points1.size ==
|
||||
interp_points2.size ==
|
||||
interp_points3.size ==
|
||||
interp_points4.size, True)
|
||||
|
||||
@parametrize_interpolators
|
||||
def test_read_only(self, interpolator):
|
||||
# input data
|
||||
np.random.seed(0)
|
||||
xy = np.random.random((10, 2))
|
||||
x, y = xy[:, 0], xy[:, 1]
|
||||
z = np.hypot(x, y)
|
||||
|
||||
# interpolation points
|
||||
XY = np.random.random((50, 2))
|
||||
|
||||
xy.setflags(write=False)
|
||||
z.setflags(write=False)
|
||||
XY.setflags(write=False)
|
||||
|
||||
interp = interpolator(xy, z)
|
||||
interp(XY)
|
||||
@ -0,0 +1,104 @@
|
||||
from numpy.testing import (assert_array_equal, assert_array_almost_equal)
|
||||
from scipy.interpolate import pade
|
||||
|
||||
def test_pade_trivial():
|
||||
nump, denomp = pade([1.0], 0)
|
||||
assert_array_equal(nump.c, [1.0])
|
||||
assert_array_equal(denomp.c, [1.0])
|
||||
|
||||
nump, denomp = pade([1.0], 0, 0)
|
||||
assert_array_equal(nump.c, [1.0])
|
||||
assert_array_equal(denomp.c, [1.0])
|
||||
|
||||
|
||||
def test_pade_4term_exp():
|
||||
# First four Taylor coefficients of exp(x).
|
||||
# Unlike poly1d, the first array element is the zero-order term.
|
||||
an = [1.0, 1.0, 0.5, 1.0/6]
|
||||
|
||||
nump, denomp = pade(an, 0)
|
||||
assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [1.0])
|
||||
|
||||
nump, denomp = pade(an, 1)
|
||||
assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
|
||||
|
||||
nump, denomp = pade(an, 2)
|
||||
assert_array_almost_equal(nump.c, [1.0/3, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
|
||||
|
||||
nump, denomp = pade(an, 3)
|
||||
assert_array_almost_equal(nump.c, [1.0])
|
||||
assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
|
||||
|
||||
# Testing inclusion of optional parameter
|
||||
nump, denomp = pade(an, 0, 3)
|
||||
assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [1.0])
|
||||
|
||||
nump, denomp = pade(an, 1, 2)
|
||||
assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
|
||||
|
||||
nump, denomp = pade(an, 2, 1)
|
||||
assert_array_almost_equal(nump.c, [1.0/3, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
|
||||
|
||||
nump, denomp = pade(an, 3, 0)
|
||||
assert_array_almost_equal(nump.c, [1.0])
|
||||
assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
|
||||
|
||||
# Testing reducing array.
|
||||
nump, denomp = pade(an, 0, 2)
|
||||
assert_array_almost_equal(nump.c, [0.5, 1.0, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [1.0])
|
||||
|
||||
nump, denomp = pade(an, 1, 1)
|
||||
assert_array_almost_equal(nump.c, [1.0/2, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [-1.0/2, 1.0])
|
||||
|
||||
nump, denomp = pade(an, 2, 0)
|
||||
assert_array_almost_equal(nump.c, [1.0])
|
||||
assert_array_almost_equal(denomp.c, [1.0/2, -1.0, 1.0])
|
||||
|
||||
|
||||
def test_pade_ints():
|
||||
# Simple test sequences (one of ints, one of floats).
|
||||
an_int = [1, 2, 3, 4]
|
||||
an_flt = [1.0, 2.0, 3.0, 4.0]
|
||||
|
||||
# Make sure integer arrays give the same result as float arrays with same values.
|
||||
for i in range(0, len(an_int)):
|
||||
for j in range(0, len(an_int) - i):
|
||||
|
||||
# Create float and int pade approximation for given order.
|
||||
nump_int, denomp_int = pade(an_int, i, j)
|
||||
nump_flt, denomp_flt = pade(an_flt, i, j)
|
||||
|
||||
# Check that they are the same.
|
||||
assert_array_equal(nump_int.c, nump_flt.c)
|
||||
assert_array_equal(denomp_int.c, denomp_flt.c)
|
||||
|
||||
|
||||
def test_pade_complex():
|
||||
# Test sequence with known solutions - see page 6 of 10.1109/PESGM.2012.6344759.
|
||||
# Variable x is parameter - these tests will work with any complex number.
|
||||
x = 0.2 + 0.6j
|
||||
an = [1.0, x, -x*x.conjugate(), x.conjugate()*(x**2) + x*(x.conjugate()**2),
|
||||
-(x**3)*x.conjugate() - 3*(x*x.conjugate())**2 - x*(x.conjugate()**3)]
|
||||
|
||||
nump, denomp = pade(an, 1, 1)
|
||||
assert_array_almost_equal(nump.c, [x + x.conjugate(), 1.0])
|
||||
assert_array_almost_equal(denomp.c, [x.conjugate(), 1.0])
|
||||
|
||||
nump, denomp = pade(an, 1, 2)
|
||||
assert_array_almost_equal(nump.c, [x**2, 2*x + x.conjugate(), 1.0])
|
||||
assert_array_almost_equal(denomp.c, [x + x.conjugate(), 1.0])
|
||||
|
||||
nump, denomp = pade(an, 2, 2)
|
||||
assert_array_almost_equal(
|
||||
nump.c,
|
||||
[x**2 + x*x.conjugate() + x.conjugate()**2, 2*(x + x.conjugate()), 1.0]
|
||||
)
|
||||
assert_array_almost_equal(denomp.c, [x.conjugate()**2, x + 2*x.conjugate(), 1.0])
|
||||
@ -0,0 +1,941 @@
|
||||
import warnings
|
||||
import io
|
||||
import numpy as np
|
||||
|
||||
from numpy.testing import (
|
||||
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
|
||||
assert_allclose, assert_equal, assert_)
|
||||
from pytest import raises as assert_raises
|
||||
import pytest
|
||||
|
||||
from scipy.interpolate import (
|
||||
KroghInterpolator, krogh_interpolate,
|
||||
BarycentricInterpolator, barycentric_interpolate,
|
||||
approximate_taylor_polynomial, CubicHermiteSpline, pchip,
|
||||
PchipInterpolator, pchip_interpolate, Akima1DInterpolator, CubicSpline,
|
||||
make_interp_spline)
|
||||
|
||||
|
||||
def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0,
|
||||
extra_args={}):
|
||||
np.random.seed(1234)
|
||||
|
||||
x = [-1, 0, 1, 2, 3, 4]
|
||||
s = list(range(1, len(y_shape)+1))
|
||||
s.insert(axis % (len(y_shape)+1), 0)
|
||||
y = np.random.rand(*((6,) + y_shape)).transpose(s)
|
||||
|
||||
xi = np.zeros(x_shape)
|
||||
if interpolator_cls is CubicHermiteSpline:
|
||||
dydx = np.random.rand(*((6,) + y_shape)).transpose(s)
|
||||
yi = interpolator_cls(x, y, dydx, axis=axis, **extra_args)(xi)
|
||||
else:
|
||||
yi = interpolator_cls(x, y, axis=axis, **extra_args)(xi)
|
||||
|
||||
target_shape = ((deriv_shape or ()) + y.shape[:axis]
|
||||
+ x_shape + y.shape[axis:][1:])
|
||||
assert_equal(yi.shape, target_shape)
|
||||
|
||||
# check it works also with lists
|
||||
if x_shape and y.size > 0:
|
||||
if interpolator_cls is CubicHermiteSpline:
|
||||
interpolator_cls(list(x), list(y), list(dydx), axis=axis,
|
||||
**extra_args)(list(xi))
|
||||
else:
|
||||
interpolator_cls(list(x), list(y), axis=axis,
|
||||
**extra_args)(list(xi))
|
||||
|
||||
# check also values
|
||||
if xi.size > 0 and deriv_shape is None:
|
||||
bs_shape = y.shape[:axis] + (1,)*len(x_shape) + y.shape[axis:][1:]
|
||||
yv = y[((slice(None,),)*(axis % y.ndim)) + (1,)]
|
||||
yv = yv.reshape(bs_shape)
|
||||
|
||||
yi, y = np.broadcast_arrays(yi, yv)
|
||||
assert_allclose(yi, y)
|
||||
|
||||
|
||||
SHAPES = [(), (0,), (1,), (6, 2, 5)]
|
||||
|
||||
|
||||
def test_shapes():
|
||||
|
||||
def spl_interp(x, y, axis):
|
||||
return make_interp_spline(x, y, axis=axis)
|
||||
|
||||
for ip in [KroghInterpolator, BarycentricInterpolator, CubicHermiteSpline,
|
||||
pchip, Akima1DInterpolator, CubicSpline, spl_interp]:
|
||||
for s1 in SHAPES:
|
||||
for s2 in SHAPES:
|
||||
for axis in range(-len(s2), len(s2)):
|
||||
if ip != CubicSpline:
|
||||
check_shape(ip, s1, s2, None, axis)
|
||||
else:
|
||||
for bc in ['natural', 'clamped']:
|
||||
extra = {'bc_type': bc}
|
||||
check_shape(ip, s1, s2, None, axis, extra)
|
||||
|
||||
def test_derivs_shapes():
|
||||
for ip in [KroghInterpolator, BarycentricInterpolator]:
|
||||
def interpolator_derivs(x, y, axis=0):
|
||||
return ip(x, y, axis).derivatives
|
||||
|
||||
for s1 in SHAPES:
|
||||
for s2 in SHAPES:
|
||||
for axis in range(-len(s2), len(s2)):
|
||||
check_shape(interpolator_derivs, s1, s2, (6,), axis)
|
||||
|
||||
|
||||
def test_deriv_shapes():
|
||||
def krogh_deriv(x, y, axis=0):
|
||||
return KroghInterpolator(x, y, axis).derivative
|
||||
|
||||
def bary_deriv(x, y, axis=0):
|
||||
return BarycentricInterpolator(x, y, axis).derivative
|
||||
|
||||
def pchip_deriv(x, y, axis=0):
|
||||
return pchip(x, y, axis).derivative()
|
||||
|
||||
def pchip_deriv2(x, y, axis=0):
|
||||
return pchip(x, y, axis).derivative(2)
|
||||
|
||||
def pchip_antideriv(x, y, axis=0):
|
||||
return pchip(x, y, axis).antiderivative()
|
||||
|
||||
def pchip_antideriv2(x, y, axis=0):
|
||||
return pchip(x, y, axis).antiderivative(2)
|
||||
|
||||
def pchip_deriv_inplace(x, y, axis=0):
|
||||
class P(PchipInterpolator):
|
||||
def __call__(self, x):
|
||||
return PchipInterpolator.__call__(self, x, 1)
|
||||
pass
|
||||
return P(x, y, axis)
|
||||
|
||||
def akima_deriv(x, y, axis=0):
|
||||
return Akima1DInterpolator(x, y, axis).derivative()
|
||||
|
||||
def akima_antideriv(x, y, axis=0):
|
||||
return Akima1DInterpolator(x, y, axis).antiderivative()
|
||||
|
||||
def cspline_deriv(x, y, axis=0):
|
||||
return CubicSpline(x, y, axis).derivative()
|
||||
|
||||
def cspline_antideriv(x, y, axis=0):
|
||||
return CubicSpline(x, y, axis).antiderivative()
|
||||
|
||||
def bspl_deriv(x, y, axis=0):
|
||||
return make_interp_spline(x, y, axis=axis).derivative()
|
||||
|
||||
def bspl_antideriv(x, y, axis=0):
|
||||
return make_interp_spline(x, y, axis=axis).antiderivative()
|
||||
|
||||
for ip in [krogh_deriv, bary_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace,
|
||||
pchip_antideriv, pchip_antideriv2, akima_deriv, akima_antideriv,
|
||||
cspline_deriv, cspline_antideriv, bspl_deriv, bspl_antideriv]:
|
||||
for s1 in SHAPES:
|
||||
for s2 in SHAPES:
|
||||
for axis in range(-len(s2), len(s2)):
|
||||
check_shape(ip, s1, s2, (), axis)
|
||||
|
||||
|
||||
def test_complex():
|
||||
x = [1, 2, 3, 4]
|
||||
y = [1, 2, 1j, 3]
|
||||
|
||||
for ip in [KroghInterpolator, BarycentricInterpolator, CubicSpline]:
|
||||
p = ip(x, y)
|
||||
assert_allclose(y, p(x))
|
||||
|
||||
dydx = [0, -1j, 2, 3j]
|
||||
p = CubicHermiteSpline(x, y, dydx)
|
||||
assert_allclose(y, p(x))
|
||||
assert_allclose(dydx, p(x, 1))
|
||||
|
||||
|
||||
class TestKrogh:
|
||||
def setup_method(self):
|
||||
self.true_poly = np.polynomial.Polynomial([-4, 5, 1, 3, -2])
|
||||
self.test_xs = np.linspace(-1,1,100)
|
||||
self.xs = np.linspace(-1,1,5)
|
||||
self.ys = self.true_poly(self.xs)
|
||||
|
||||
def test_lagrange(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
|
||||
|
||||
def test_scalar(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
assert_almost_equal(self.true_poly(7),P(7))
|
||||
assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))
|
||||
|
||||
def test_derivatives(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
D = P.derivatives(self.test_xs)
|
||||
for i in range(D.shape[0]):
|
||||
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
|
||||
D[i])
|
||||
|
||||
def test_low_derivatives(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
D = P.derivatives(self.test_xs,len(self.xs)+2)
|
||||
for i in range(D.shape[0]):
|
||||
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
|
||||
D[i])
|
||||
|
||||
def test_derivative(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
m = 10
|
||||
r = P.derivatives(self.test_xs,m)
|
||||
for i in range(m):
|
||||
assert_almost_equal(P.derivative(self.test_xs,i),r[i])
|
||||
|
||||
def test_high_derivative(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
for i in range(len(self.xs), 2*len(self.xs)):
|
||||
assert_almost_equal(P.derivative(self.test_xs,i),
|
||||
np.zeros(len(self.test_xs)))
|
||||
|
||||
def test_ndim_derivatives(self):
|
||||
poly1 = self.true_poly
|
||||
poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
|
||||
poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
|
||||
ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
|
||||
|
||||
P = KroghInterpolator(self.xs, ys, axis=0)
|
||||
D = P.derivatives(self.test_xs)
|
||||
for i in range(D.shape[0]):
|
||||
assert_allclose(D[i],
|
||||
np.stack((poly1.deriv(i)(self.test_xs),
|
||||
poly2.deriv(i)(self.test_xs),
|
||||
poly3.deriv(i)(self.test_xs)),
|
||||
axis=-1))
|
||||
|
||||
def test_ndim_derivative(self):
|
||||
poly1 = self.true_poly
|
||||
poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
|
||||
poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
|
||||
ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
|
||||
|
||||
P = KroghInterpolator(self.xs, ys, axis=0)
|
||||
for i in range(P.n):
|
||||
assert_allclose(P.derivative(self.test_xs, i),
|
||||
np.stack((poly1.deriv(i)(self.test_xs),
|
||||
poly2.deriv(i)(self.test_xs),
|
||||
poly3.deriv(i)(self.test_xs)),
|
||||
axis=-1))
|
||||
|
||||
def test_hermite(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
|
||||
|
||||
def test_vector(self):
|
||||
xs = [0, 1, 2]
|
||||
ys = np.array([[0,1],[1,0],[2,1]])
|
||||
P = KroghInterpolator(xs,ys)
|
||||
Pi = [KroghInterpolator(xs,ys[:,i]) for i in range(ys.shape[1])]
|
||||
test_xs = np.linspace(-1,3,100)
|
||||
assert_almost_equal(P(test_xs),
|
||||
np.asarray([p(test_xs) for p in Pi]).T)
|
||||
assert_almost_equal(P.derivatives(test_xs),
|
||||
np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
|
||||
(1,2,0)))
|
||||
|
||||
def test_empty(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
assert_array_equal(P([]), [])
|
||||
|
||||
def test_shapes_scalarvalue(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
assert_array_equal(np.shape(P(0)), ())
|
||||
assert_array_equal(np.shape(P(np.array(0))), ())
|
||||
assert_array_equal(np.shape(P([0])), (1,))
|
||||
assert_array_equal(np.shape(P([0,1])), (2,))
|
||||
|
||||
def test_shapes_scalarvalue_derivative(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
n = P.n
|
||||
assert_array_equal(np.shape(P.derivatives(0)), (n,))
|
||||
assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
|
||||
assert_array_equal(np.shape(P.derivatives([0])), (n,1))
|
||||
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
|
||||
|
||||
def test_shapes_vectorvalue(self):
|
||||
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
|
||||
assert_array_equal(np.shape(P(0)), (3,))
|
||||
assert_array_equal(np.shape(P([0])), (1,3))
|
||||
assert_array_equal(np.shape(P([0,1])), (2,3))
|
||||
|
||||
def test_shapes_1d_vectorvalue(self):
|
||||
P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))
|
||||
assert_array_equal(np.shape(P(0)), (1,))
|
||||
assert_array_equal(np.shape(P([0])), (1,1))
|
||||
assert_array_equal(np.shape(P([0,1])), (2,1))
|
||||
|
||||
def test_shapes_vectorvalue_derivative(self):
|
||||
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
|
||||
n = P.n
|
||||
assert_array_equal(np.shape(P.derivatives(0)), (n,3))
|
||||
assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
|
||||
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
|
||||
|
||||
def test_wrapper(self):
|
||||
P = KroghInterpolator(self.xs, self.ys)
|
||||
ki = krogh_interpolate
|
||||
assert_almost_equal(P(self.test_xs), ki(self.xs, self.ys, self.test_xs))
|
||||
assert_almost_equal(P.derivative(self.test_xs, 2),
|
||||
ki(self.xs, self.ys, self.test_xs, der=2))
|
||||
assert_almost_equal(P.derivatives(self.test_xs, 2),
|
||||
ki(self.xs, self.ys, self.test_xs, der=[0, 1]))
|
||||
|
||||
def test_int_inputs(self):
|
||||
# Check input args are cast correctly to floats, gh-3669
|
||||
x = [0, 234, 468, 702, 936, 1170, 1404, 2340, 3744, 6084, 8424,
|
||||
13104, 60000]
|
||||
offset_cdf = np.array([-0.95, -0.86114777, -0.8147762, -0.64072425,
|
||||
-0.48002351, -0.34925329, -0.26503107,
|
||||
-0.13148093, -0.12988833, -0.12979296,
|
||||
-0.12973574, -0.08582937, 0.05])
|
||||
f = KroghInterpolator(x, offset_cdf)
|
||||
|
||||
assert_allclose(abs((f(x) - offset_cdf) / f.derivative(x, 1)),
|
||||
0, atol=1e-10)
|
||||
|
||||
def test_derivatives_complex(self):
|
||||
# regression test for gh-7381: krogh.derivatives(0) fails complex y
|
||||
x, y = np.array([-1, -1, 0, 1, 1]), np.array([1, 1.0j, 0, -1, 1.0j])
|
||||
func = KroghInterpolator(x, y)
|
||||
cmplx = func.derivatives(0)
|
||||
|
||||
cmplx2 = (KroghInterpolator(x, y.real).derivatives(0) +
|
||||
1j*KroghInterpolator(x, y.imag).derivatives(0))
|
||||
assert_allclose(cmplx, cmplx2, atol=1e-15)
|
||||
|
||||
def test_high_degree_warning(self):
|
||||
with pytest.warns(UserWarning, match="40 degrees provided,"):
|
||||
KroghInterpolator(np.arange(40), np.ones(40))
|
||||
|
||||
|
||||
class TestTaylor:
|
||||
def test_exponential(self):
|
||||
degree = 5
|
||||
p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
|
||||
for i in range(degree+1):
|
||||
assert_almost_equal(p(0),1)
|
||||
p = p.deriv()
|
||||
assert_almost_equal(p(0),0)
|
||||
|
||||
|
||||
class TestBarycentric:
|
||||
def setup_method(self):
|
||||
self.true_poly = np.polynomial.Polynomial([-4, 5, 1, 3, -2])
|
||||
self.test_xs = np.linspace(-1, 1, 100)
|
||||
self.xs = np.linspace(-1, 1, 5)
|
||||
self.ys = self.true_poly(self.xs)
|
||||
|
||||
def test_lagrange(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
assert_allclose(P(self.test_xs), self.true_poly(self.test_xs))
|
||||
|
||||
def test_scalar(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
assert_allclose(P(7), self.true_poly(7))
|
||||
assert_allclose(P(np.array(7)), self.true_poly(np.array(7)))
|
||||
|
||||
def test_derivatives(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
D = P.derivatives(self.test_xs)
|
||||
for i in range(D.shape[0]):
|
||||
assert_allclose(self.true_poly.deriv(i)(self.test_xs), D[i])
|
||||
|
||||
def test_low_derivatives(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
D = P.derivatives(self.test_xs, len(self.xs)+2)
|
||||
for i in range(D.shape[0]):
|
||||
assert_allclose(self.true_poly.deriv(i)(self.test_xs),
|
||||
D[i],
|
||||
atol=1e-12)
|
||||
|
||||
def test_derivative(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
m = 10
|
||||
r = P.derivatives(self.test_xs, m)
|
||||
for i in range(m):
|
||||
assert_allclose(P.derivative(self.test_xs, i), r[i])
|
||||
|
||||
def test_high_derivative(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
for i in range(len(self.xs), 5*len(self.xs)):
|
||||
assert_allclose(P.derivative(self.test_xs, i),
|
||||
np.zeros(len(self.test_xs)))
|
||||
|
||||
def test_ndim_derivatives(self):
|
||||
poly1 = self.true_poly
|
||||
poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
|
||||
poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
|
||||
ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
|
||||
|
||||
P = BarycentricInterpolator(self.xs, ys, axis=0)
|
||||
D = P.derivatives(self.test_xs)
|
||||
for i in range(D.shape[0]):
|
||||
assert_allclose(D[i],
|
||||
np.stack((poly1.deriv(i)(self.test_xs),
|
||||
poly2.deriv(i)(self.test_xs),
|
||||
poly3.deriv(i)(self.test_xs)),
|
||||
axis=-1),
|
||||
atol=1e-12)
|
||||
|
||||
def test_ndim_derivative(self):
|
||||
poly1 = self.true_poly
|
||||
poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
|
||||
poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
|
||||
ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
|
||||
|
||||
P = BarycentricInterpolator(self.xs, ys, axis=0)
|
||||
for i in range(P.n):
|
||||
assert_allclose(P.derivative(self.test_xs, i),
|
||||
np.stack((poly1.deriv(i)(self.test_xs),
|
||||
poly2.deriv(i)(self.test_xs),
|
||||
poly3.deriv(i)(self.test_xs)),
|
||||
axis=-1),
|
||||
atol=1e-12)
|
||||
|
||||
def test_delayed(self):
|
||||
P = BarycentricInterpolator(self.xs)
|
||||
P.set_yi(self.ys)
|
||||
assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
|
||||
|
||||
def test_append(self):
|
||||
P = BarycentricInterpolator(self.xs[:3], self.ys[:3])
|
||||
P.add_xi(self.xs[3:], self.ys[3:])
|
||||
assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
|
||||
|
||||
def test_vector(self):
|
||||
xs = [0, 1, 2]
|
||||
ys = np.array([[0, 1], [1, 0], [2, 1]])
|
||||
BI = BarycentricInterpolator
|
||||
P = BI(xs, ys)
|
||||
Pi = [BI(xs, ys[:, i]) for i in range(ys.shape[1])]
|
||||
test_xs = np.linspace(-1, 3, 100)
|
||||
assert_almost_equal(P(test_xs),
|
||||
np.asarray([p(test_xs) for p in Pi]).T)
|
||||
|
||||
def test_shapes_scalarvalue(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
assert_array_equal(np.shape(P(0)), ())
|
||||
assert_array_equal(np.shape(P(np.array(0))), ())
|
||||
assert_array_equal(np.shape(P([0])), (1,))
|
||||
assert_array_equal(np.shape(P([0, 1])), (2,))
|
||||
|
||||
def test_shapes_scalarvalue_derivative(self):
|
||||
P = BarycentricInterpolator(self.xs,self.ys)
|
||||
n = P.n
|
||||
assert_array_equal(np.shape(P.derivatives(0)), (n,))
|
||||
assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
|
||||
assert_array_equal(np.shape(P.derivatives([0])), (n,1))
|
||||
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
|
||||
|
||||
def test_shapes_vectorvalue(self):
|
||||
P = BarycentricInterpolator(self.xs, np.outer(self.ys, np.arange(3)))
|
||||
assert_array_equal(np.shape(P(0)), (3,))
|
||||
assert_array_equal(np.shape(P([0])), (1, 3))
|
||||
assert_array_equal(np.shape(P([0, 1])), (2, 3))
|
||||
|
||||
def test_shapes_1d_vectorvalue(self):
|
||||
P = BarycentricInterpolator(self.xs, np.outer(self.ys, [1]))
|
||||
assert_array_equal(np.shape(P(0)), (1,))
|
||||
assert_array_equal(np.shape(P([0])), (1, 1))
|
||||
assert_array_equal(np.shape(P([0,1])), (2, 1))
|
||||
|
||||
def test_shapes_vectorvalue_derivative(self):
|
||||
P = BarycentricInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
|
||||
n = P.n
|
||||
assert_array_equal(np.shape(P.derivatives(0)), (n,3))
|
||||
assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
|
||||
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
|
||||
|
||||
def test_wrapper(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
bi = barycentric_interpolate
|
||||
assert_allclose(P(self.test_xs), bi(self.xs, self.ys, self.test_xs))
|
||||
assert_allclose(P.derivative(self.test_xs, 2),
|
||||
bi(self.xs, self.ys, self.test_xs, der=2))
|
||||
assert_allclose(P.derivatives(self.test_xs, 2),
|
||||
bi(self.xs, self.ys, self.test_xs, der=[0, 1]))
|
||||
|
||||
def test_int_input(self):
|
||||
x = 1000 * np.arange(1, 11) # np.prod(x[-1] - x[:-1]) overflows
|
||||
y = np.arange(1, 11)
|
||||
value = barycentric_interpolate(x, y, 1000 * 9.5)
|
||||
assert_almost_equal(value, 9.5)
|
||||
|
||||
def test_large_chebyshev(self):
|
||||
# The weights for Chebyshev points of the second kind have analytically
|
||||
# solvable weights. Naive calculation of barycentric weights will fail
|
||||
# for large N because of numerical underflow and overflow. We test
|
||||
# correctness for large N against analytical Chebyshev weights.
|
||||
|
||||
# Without capacity scaling or permutation, n=800 fails,
|
||||
# With just capacity scaling, n=1097 fails
|
||||
# With both capacity scaling and random permutation, n=30000 succeeds
|
||||
n = 1100
|
||||
j = np.arange(n + 1).astype(np.float64)
|
||||
x = np.cos(j * np.pi / n)
|
||||
|
||||
# See page 506 of Berrut and Trefethen 2004 for this formula
|
||||
w = (-1) ** j
|
||||
w[0] *= 0.5
|
||||
w[-1] *= 0.5
|
||||
|
||||
P = BarycentricInterpolator(x)
|
||||
|
||||
# It's okay to have a constant scaling factor in the weights because it
|
||||
# cancels out in the evaluation of the polynomial.
|
||||
factor = P.wi[0]
|
||||
assert_almost_equal(P.wi / (2 * factor), w)
|
||||
|
||||
def test_warning(self):
|
||||
# Test if the divide-by-zero warning is properly ignored when computing
|
||||
# interpolated values equals to interpolation points
|
||||
P = BarycentricInterpolator([0, 1], [1, 2])
|
||||
with np.errstate(divide='raise'):
|
||||
yi = P(P.xi)
|
||||
|
||||
# Check if the interpolated values match the input values
|
||||
# at the nodes
|
||||
assert_almost_equal(yi, P.yi.ravel())
|
||||
|
||||
def test_repeated_node(self):
|
||||
# check that a repeated node raises a ValueError
|
||||
# (computing the weights requires division by xi[i] - xi[j])
|
||||
xis = np.array([0.1, 0.5, 0.9, 0.5])
|
||||
ys = np.array([1, 2, 3, 4])
|
||||
with pytest.raises(ValueError,
|
||||
match="Interpolation points xi must be distinct."):
|
||||
BarycentricInterpolator(xis, ys)
|
||||
|
||||
|
||||
class TestPCHIP:
|
||||
def _make_random(self, npts=20):
|
||||
np.random.seed(1234)
|
||||
xi = np.sort(np.random.random(npts))
|
||||
yi = np.random.random(npts)
|
||||
return pchip(xi, yi), xi, yi
|
||||
|
||||
def test_overshoot(self):
|
||||
# PCHIP should not overshoot
|
||||
p, xi, yi = self._make_random()
|
||||
for i in range(len(xi)-1):
|
||||
x1, x2 = xi[i], xi[i+1]
|
||||
y1, y2 = yi[i], yi[i+1]
|
||||
if y1 > y2:
|
||||
y1, y2 = y2, y1
|
||||
xp = np.linspace(x1, x2, 10)
|
||||
yp = p(xp)
|
||||
assert_(((y1 <= yp + 1e-15) & (yp <= y2 + 1e-15)).all())
|
||||
|
||||
def test_monotone(self):
|
||||
# PCHIP should preserve monotonicty
|
||||
p, xi, yi = self._make_random()
|
||||
for i in range(len(xi)-1):
|
||||
x1, x2 = xi[i], xi[i+1]
|
||||
y1, y2 = yi[i], yi[i+1]
|
||||
xp = np.linspace(x1, x2, 10)
|
||||
yp = p(xp)
|
||||
assert_(((y2-y1) * (yp[1:] - yp[:1]) > 0).all())
|
||||
|
||||
def test_cast(self):
|
||||
# regression test for integer input data, see gh-3453
|
||||
data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100],
|
||||
[-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
|
||||
xx = np.arange(100)
|
||||
curve = pchip(data[0], data[1])(xx)
|
||||
|
||||
data1 = data * 1.0
|
||||
curve1 = pchip(data1[0], data1[1])(xx)
|
||||
|
||||
assert_allclose(curve, curve1, atol=1e-14, rtol=1e-14)
|
||||
|
||||
def test_nag(self):
|
||||
# Example from NAG C implementation,
|
||||
# http://nag.com/numeric/cl/nagdoc_cl25/html/e01/e01bec.html
|
||||
# suggested in gh-5326 as a smoke test for the way the derivatives
|
||||
# are computed (see also gh-3453)
|
||||
dataStr = '''
|
||||
7.99 0.00000E+0
|
||||
8.09 0.27643E-4
|
||||
8.19 0.43750E-1
|
||||
8.70 0.16918E+0
|
||||
9.20 0.46943E+0
|
||||
10.00 0.94374E+0
|
||||
12.00 0.99864E+0
|
||||
15.00 0.99992E+0
|
||||
20.00 0.99999E+0
|
||||
'''
|
||||
data = np.loadtxt(io.StringIO(dataStr))
|
||||
pch = pchip(data[:,0], data[:,1])
|
||||
|
||||
resultStr = '''
|
||||
7.9900 0.0000
|
||||
9.1910 0.4640
|
||||
10.3920 0.9645
|
||||
11.5930 0.9965
|
||||
12.7940 0.9992
|
||||
13.9950 0.9998
|
||||
15.1960 0.9999
|
||||
16.3970 1.0000
|
||||
17.5980 1.0000
|
||||
18.7990 1.0000
|
||||
20.0000 1.0000
|
||||
'''
|
||||
result = np.loadtxt(io.StringIO(resultStr))
|
||||
assert_allclose(result[:,1], pch(result[:,0]), rtol=0., atol=5e-5)
|
||||
|
||||
def test_endslopes(self):
|
||||
# this is a smoke test for gh-3453: PCHIP interpolator should not
|
||||
# set edge slopes to zero if the data do not suggest zero edge derivatives
|
||||
x = np.array([0.0, 0.1, 0.25, 0.35])
|
||||
y1 = np.array([279.35, 0.5e3, 1.0e3, 2.5e3])
|
||||
y2 = np.array([279.35, 2.5e3, 1.50e3, 1.0e3])
|
||||
for pp in (pchip(x, y1), pchip(x, y2)):
|
||||
for t in (x[0], x[-1]):
|
||||
assert_(pp(t, 1) != 0)
|
||||
|
||||
def test_all_zeros(self):
|
||||
x = np.arange(10)
|
||||
y = np.zeros_like(x)
|
||||
|
||||
# this should work and not generate any warnings
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings('error')
|
||||
pch = pchip(x, y)
|
||||
|
||||
xx = np.linspace(0, 9, 101)
|
||||
assert_equal(pch(xx), 0.)
|
||||
|
||||
def test_two_points(self):
|
||||
# regression test for gh-6222: pchip([0, 1], [0, 1]) fails because
|
||||
# it tries to use a three-point scheme to estimate edge derivatives,
|
||||
# while there are only two points available.
|
||||
# Instead, it should construct a linear interpolator.
|
||||
x = np.linspace(0, 1, 11)
|
||||
p = pchip([0, 1], [0, 2])
|
||||
assert_allclose(p(x), 2*x, atol=1e-15)
|
||||
|
||||
def test_pchip_interpolate(self):
|
||||
assert_array_almost_equal(
|
||||
pchip_interpolate([1,2,3], [4,5,6], [0.5], der=1),
|
||||
[1.])
|
||||
|
||||
assert_array_almost_equal(
|
||||
pchip_interpolate([1,2,3], [4,5,6], [0.5], der=0),
|
||||
[3.5])
|
||||
|
||||
assert_array_almost_equal(
|
||||
pchip_interpolate([1,2,3], [4,5,6], [0.5], der=[0, 1]),
|
||||
[[3.5], [1]])
|
||||
|
||||
def test_roots(self):
|
||||
# regression test for gh-6357: .roots method should work
|
||||
p = pchip([0, 1], [-1, 1])
|
||||
r = p.roots()
|
||||
assert_allclose(r, 0.5)
|
||||
|
||||
|
||||
class TestCubicSpline:
|
||||
@staticmethod
|
||||
def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',
|
||||
tol=1e-14):
|
||||
"""Check that spline coefficients satisfy the continuity and boundary
|
||||
conditions."""
|
||||
x = S.x
|
||||
c = S.c
|
||||
dx = np.diff(x)
|
||||
dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))
|
||||
dxi = dx[:-1]
|
||||
|
||||
# Check C2 continuity.
|
||||
assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +
|
||||
c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)
|
||||
assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +
|
||||
2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)
|
||||
assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],
|
||||
rtol=tol, atol=tol)
|
||||
|
||||
# Check that we found a parabola, the third derivative is 0.
|
||||
if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':
|
||||
assert_allclose(c[0], 0, rtol=tol, atol=tol)
|
||||
return
|
||||
|
||||
# Check periodic boundary conditions.
|
||||
if bc_start == 'periodic':
|
||||
assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)
|
||||
assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)
|
||||
assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)
|
||||
return
|
||||
|
||||
# Check other boundary conditions.
|
||||
if bc_start == 'not-a-knot':
|
||||
if x.size == 2:
|
||||
slope = (S(x[1]) - S(x[0])) / dx[0]
|
||||
assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)
|
||||
else:
|
||||
assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)
|
||||
elif bc_start == 'clamped':
|
||||
assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)
|
||||
elif bc_start == 'natural':
|
||||
assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)
|
||||
else:
|
||||
order, value = bc_start
|
||||
assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)
|
||||
|
||||
if bc_end == 'not-a-knot':
|
||||
if x.size == 2:
|
||||
slope = (S(x[1]) - S(x[0])) / dx[0]
|
||||
assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)
|
||||
else:
|
||||
assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)
|
||||
elif bc_end == 'clamped':
|
||||
assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)
|
||||
elif bc_end == 'natural':
|
||||
assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)
|
||||
else:
|
||||
order, value = bc_end
|
||||
assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)
|
||||
|
||||
def check_all_bc(self, x, y, axis):
|
||||
deriv_shape = list(y.shape)
|
||||
del deriv_shape[axis]
|
||||
first_deriv = np.empty(deriv_shape)
|
||||
first_deriv.fill(2)
|
||||
second_deriv = np.empty(deriv_shape)
|
||||
second_deriv.fill(-1)
|
||||
bc_all = [
|
||||
'not-a-knot',
|
||||
'natural',
|
||||
'clamped',
|
||||
(1, first_deriv),
|
||||
(2, second_deriv)
|
||||
]
|
||||
for bc in bc_all[:3]:
|
||||
S = CubicSpline(x, y, axis=axis, bc_type=bc)
|
||||
self.check_correctness(S, bc, bc)
|
||||
|
||||
for bc_start in bc_all:
|
||||
for bc_end in bc_all:
|
||||
S = CubicSpline(x, y, axis=axis, bc_type=(bc_start, bc_end))
|
||||
self.check_correctness(S, bc_start, bc_end, tol=2e-14)
|
||||
|
||||
def test_general(self):
|
||||
x = np.array([-1, 0, 0.5, 2, 4, 4.5, 5.5, 9])
|
||||
y = np.array([0, -0.5, 2, 3, 2.5, 1, 1, 0.5])
|
||||
for n in [2, 3, x.size]:
|
||||
self.check_all_bc(x[:n], y[:n], 0)
|
||||
|
||||
Y = np.empty((2, n, 2))
|
||||
Y[0, :, 0] = y[:n]
|
||||
Y[0, :, 1] = y[:n] - 1
|
||||
Y[1, :, 0] = y[:n] + 2
|
||||
Y[1, :, 1] = y[:n] + 3
|
||||
self.check_all_bc(x[:n], Y, 1)
|
||||
|
||||
def test_periodic(self):
|
||||
for n in [2, 3, 5]:
|
||||
x = np.linspace(0, 2 * np.pi, n)
|
||||
y = np.cos(x)
|
||||
S = CubicSpline(x, y, bc_type='periodic')
|
||||
self.check_correctness(S, 'periodic', 'periodic')
|
||||
|
||||
Y = np.empty((2, n, 2))
|
||||
Y[0, :, 0] = y
|
||||
Y[0, :, 1] = y + 2
|
||||
Y[1, :, 0] = y - 1
|
||||
Y[1, :, 1] = y + 5
|
||||
S = CubicSpline(x, Y, axis=1, bc_type='periodic')
|
||||
self.check_correctness(S, 'periodic', 'periodic')
|
||||
|
||||
def test_periodic_eval(self):
|
||||
x = np.linspace(0, 2 * np.pi, 10)
|
||||
y = np.cos(x)
|
||||
S = CubicSpline(x, y, bc_type='periodic')
|
||||
assert_almost_equal(S(1), S(1 + 2 * np.pi), decimal=15)
|
||||
|
||||
def test_second_derivative_continuity_gh_11758(self):
|
||||
# gh-11758: C2 continuity fail
|
||||
x = np.array([0.9, 1.3, 1.9, 2.1, 2.6, 3.0, 3.9, 4.4, 4.7, 5.0, 6.0,
|
||||
7.0, 8.0, 9.2, 10.5, 11.3, 11.6, 12.0, 12.6, 13.0, 13.3])
|
||||
y = np.array([1.3, 1.5, 1.85, 2.1, 2.6, 2.7, 2.4, 2.15, 2.05, 2.1,
|
||||
2.25, 2.3, 2.25, 1.95, 1.4, 0.9, 0.7, 0.6, 0.5, 0.4, 1.3])
|
||||
S = CubicSpline(x, y, bc_type='periodic', extrapolate='periodic')
|
||||
self.check_correctness(S, 'periodic', 'periodic')
|
||||
|
||||
def test_three_points(self):
|
||||
# gh-11758: Fails computing a_m2_m1
|
||||
# In this case, s (first derivatives) could be found manually by solving
|
||||
# system of 2 linear equations. Due to solution of this system,
|
||||
# s[i] = (h1m2 + h2m1) / (h1 + h2), where h1 = x[1] - x[0], h2 = x[2] - x[1],
|
||||
# m1 = (y[1] - y[0]) / h1, m2 = (y[2] - y[1]) / h2
|
||||
x = np.array([1.0, 2.75, 3.0])
|
||||
y = np.array([1.0, 15.0, 1.0])
|
||||
S = CubicSpline(x, y, bc_type='periodic')
|
||||
self.check_correctness(S, 'periodic', 'periodic')
|
||||
assert_allclose(S.derivative(1)(x), np.array([-48.0, -48.0, -48.0]))
|
||||
|
||||
def test_periodic_three_points_multidim(self):
|
||||
# make sure one multidimensional interpolator does the same as multiple
|
||||
# one-dimensional interpolators
|
||||
x = np.array([0.0, 1.0, 3.0])
|
||||
y = np.array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
|
||||
S = CubicSpline(x, y, bc_type="periodic")
|
||||
self.check_correctness(S, 'periodic', 'periodic')
|
||||
S0 = CubicSpline(x, y[:, 0], bc_type="periodic")
|
||||
S1 = CubicSpline(x, y[:, 1], bc_type="periodic")
|
||||
q = np.linspace(0, 2, 5)
|
||||
assert_allclose(S(q)[:, 0], S0(q))
|
||||
assert_allclose(S(q)[:, 1], S1(q))
|
||||
|
||||
def test_dtypes(self):
|
||||
x = np.array([0, 1, 2, 3], dtype=int)
|
||||
y = np.array([-5, 2, 3, 1], dtype=int)
|
||||
S = CubicSpline(x, y)
|
||||
self.check_correctness(S)
|
||||
|
||||
y = np.array([-1+1j, 0.0, 1-1j, 0.5-1.5j])
|
||||
S = CubicSpline(x, y)
|
||||
self.check_correctness(S)
|
||||
|
||||
S = CubicSpline(x, x ** 3, bc_type=("natural", (1, 2j)))
|
||||
self.check_correctness(S, "natural", (1, 2j))
|
||||
|
||||
y = np.array([-5, 2, 3, 1])
|
||||
S = CubicSpline(x, y, bc_type=[(1, 2 + 0.5j), (2, 0.5 - 1j)])
|
||||
self.check_correctness(S, (1, 2 + 0.5j), (2, 0.5 - 1j))
|
||||
|
||||
def test_small_dx(self):
|
||||
rng = np.random.RandomState(0)
|
||||
x = np.sort(rng.uniform(size=100))
|
||||
y = 1e4 + rng.uniform(size=100)
|
||||
S = CubicSpline(x, y)
|
||||
self.check_correctness(S, tol=1e-13)
|
||||
|
||||
def test_incorrect_inputs(self):
|
||||
x = np.array([1, 2, 3, 4])
|
||||
y = np.array([1, 2, 3, 4])
|
||||
xc = np.array([1 + 1j, 2, 3, 4])
|
||||
xn = np.array([np.nan, 2, 3, 4])
|
||||
xo = np.array([2, 1, 3, 4])
|
||||
yn = np.array([np.nan, 2, 3, 4])
|
||||
y3 = [1, 2, 3]
|
||||
x1 = [1]
|
||||
y1 = [1]
|
||||
|
||||
assert_raises(ValueError, CubicSpline, xc, y)
|
||||
assert_raises(ValueError, CubicSpline, xn, y)
|
||||
assert_raises(ValueError, CubicSpline, x, yn)
|
||||
assert_raises(ValueError, CubicSpline, xo, y)
|
||||
assert_raises(ValueError, CubicSpline, x, y3)
|
||||
assert_raises(ValueError, CubicSpline, x[:, np.newaxis], y)
|
||||
assert_raises(ValueError, CubicSpline, x1, y1)
|
||||
|
||||
wrong_bc = [('periodic', 'clamped'),
|
||||
((2, 0), (3, 10)),
|
||||
((1, 0), ),
|
||||
(0., 0.),
|
||||
'not-a-typo']
|
||||
|
||||
for bc_type in wrong_bc:
|
||||
assert_raises(ValueError, CubicSpline, x, y, 0, bc_type, True)
|
||||
|
||||
# Shapes mismatch when giving arbitrary derivative values:
|
||||
Y = np.c_[y, y]
|
||||
bc1 = ('clamped', (1, 0))
|
||||
bc2 = ('clamped', (1, [0, 0, 0]))
|
||||
bc3 = ('clamped', (1, [[0, 0]]))
|
||||
assert_raises(ValueError, CubicSpline, x, Y, 0, bc1, True)
|
||||
assert_raises(ValueError, CubicSpline, x, Y, 0, bc2, True)
|
||||
assert_raises(ValueError, CubicSpline, x, Y, 0, bc3, True)
|
||||
|
||||
# periodic condition, y[-1] must be equal to y[0]:
|
||||
assert_raises(ValueError, CubicSpline, x, y, 0, 'periodic', True)
|
||||
|
||||
|
||||
def test_CubicHermiteSpline_correctness():
|
||||
x = [0, 2, 7]
|
||||
y = [-1, 2, 3]
|
||||
dydx = [0, 3, 7]
|
||||
s = CubicHermiteSpline(x, y, dydx)
|
||||
assert_allclose(s(x), y, rtol=1e-15)
|
||||
assert_allclose(s(x, 1), dydx, rtol=1e-15)
|
||||
|
||||
|
||||
def test_CubicHermiteSpline_error_handling():
|
||||
x = [1, 2, 3]
|
||||
y = [0, 3, 5]
|
||||
dydx = [1, -1, 2, 3]
|
||||
assert_raises(ValueError, CubicHermiteSpline, x, y, dydx)
|
||||
|
||||
dydx_with_nan = [1, 0, np.nan]
|
||||
assert_raises(ValueError, CubicHermiteSpline, x, y, dydx_with_nan)
|
||||
|
||||
|
||||
def test_roots_extrapolate_gh_11185():
|
||||
x = np.array([0.001, 0.002])
|
||||
y = np.array([1.66066935e-06, 1.10410807e-06])
|
||||
dy = np.array([-1.60061854, -1.600619])
|
||||
p = CubicHermiteSpline(x, y, dy)
|
||||
|
||||
# roots(extrapolate=True) for a polynomial with a single interval
|
||||
# should return all three real roots
|
||||
r = p.roots(extrapolate=True)
|
||||
assert_equal(p.c.shape[1], 1)
|
||||
assert_equal(r.size, 3)
|
||||
|
||||
|
||||
class TestZeroSizeArrays:
|
||||
# regression tests for gh-17241 : CubicSpline et al must not segfault
|
||||
# when y.size == 0
|
||||
# The two methods below are _almost_ the same, but not quite:
|
||||
# one is for objects which have the `bc_type` argument (CubicSpline)
|
||||
# and the other one is for those which do not (Pchip, Akima1D)
|
||||
|
||||
@pytest.mark.parametrize('y', [np.zeros((10, 0, 5)),
|
||||
np.zeros((10, 5, 0))])
|
||||
@pytest.mark.parametrize('bc_type',
|
||||
['not-a-knot', 'periodic', 'natural', 'clamped'])
|
||||
@pytest.mark.parametrize('axis', [0, 1, 2])
|
||||
@pytest.mark.parametrize('cls', [make_interp_spline, CubicSpline])
|
||||
def test_zero_size(self, cls, y, bc_type, axis):
|
||||
x = np.arange(10)
|
||||
xval = np.arange(3)
|
||||
|
||||
obj = cls(x, y, bc_type=bc_type)
|
||||
assert obj(xval).size == 0
|
||||
assert obj(xval).shape == xval.shape + y.shape[1:]
|
||||
|
||||
# Also check with an explicit non-default axis
|
||||
yt = np.moveaxis(y, 0, axis) # (10, 0, 5) --> (0, 10, 5) if axis=1 etc
|
||||
|
||||
obj = cls(x, yt, bc_type=bc_type, axis=axis)
|
||||
sh = yt.shape[:axis] + (xval.size, ) + yt.shape[axis+1:]
|
||||
assert obj(xval).size == 0
|
||||
assert obj(xval).shape == sh
|
||||
|
||||
@pytest.mark.parametrize('y', [np.zeros((10, 0, 5)),
|
||||
np.zeros((10, 5, 0))])
|
||||
@pytest.mark.parametrize('axis', [0, 1, 2])
|
||||
@pytest.mark.parametrize('cls', [PchipInterpolator, Akima1DInterpolator])
|
||||
def test_zero_size_2(self, cls, y, axis):
|
||||
x = np.arange(10)
|
||||
xval = np.arange(3)
|
||||
|
||||
obj = cls(x, y)
|
||||
assert obj(xval).size == 0
|
||||
assert obj(xval).shape == xval.shape + y.shape[1:]
|
||||
|
||||
# Also check with an explicit non-default axis
|
||||
yt = np.moveaxis(y, 0, axis) # (10, 0, 5) --> (0, 10, 5) if axis=1 etc
|
||||
|
||||
obj = cls(x, yt, axis=axis)
|
||||
sh = yt.shape[:axis] + (xval.size, ) + yt.shape[axis+1:]
|
||||
assert obj(xval).size == 0
|
||||
assert obj(xval).shape == sh
|
||||
@ -0,0 +1,222 @@
|
||||
# Created by John Travers, Robert Hetland, 2007
|
||||
""" Test functions for rbf module """
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (assert_, assert_array_almost_equal,
|
||||
assert_almost_equal)
|
||||
from numpy import linspace, sin, cos, random, exp, allclose
|
||||
from scipy.interpolate._rbf import Rbf
|
||||
|
||||
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
|
||||
'cubic', 'quintic', 'thin-plate', 'linear')
|
||||
|
||||
|
||||
def check_rbf1d_interpolation(function):
|
||||
# Check that the Rbf function interpolates through the nodes (1D)
|
||||
x = linspace(0,10,9)
|
||||
y = sin(x)
|
||||
rbf = Rbf(x, y, function=function)
|
||||
yi = rbf(x)
|
||||
assert_array_almost_equal(y, yi)
|
||||
assert_almost_equal(rbf(float(x[0])), y[0])
|
||||
|
||||
|
||||
def check_rbf2d_interpolation(function):
|
||||
# Check that the Rbf function interpolates through the nodes (2D).
|
||||
x = random.rand(50,1)*4-2
|
||||
y = random.rand(50,1)*4-2
|
||||
z = x*exp(-x**2-1j*y**2)
|
||||
rbf = Rbf(x, y, z, epsilon=2, function=function)
|
||||
zi = rbf(x, y)
|
||||
zi.shape = x.shape
|
||||
assert_array_almost_equal(z, zi)
|
||||
|
||||
|
||||
def check_rbf3d_interpolation(function):
|
||||
# Check that the Rbf function interpolates through the nodes (3D).
|
||||
x = random.rand(50, 1)*4 - 2
|
||||
y = random.rand(50, 1)*4 - 2
|
||||
z = random.rand(50, 1)*4 - 2
|
||||
d = x*exp(-x**2 - y**2)
|
||||
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
|
||||
di = rbf(x, y, z)
|
||||
di.shape = x.shape
|
||||
assert_array_almost_equal(di, d)
|
||||
|
||||
|
||||
def test_rbf_interpolation():
|
||||
for function in FUNCTIONS:
|
||||
check_rbf1d_interpolation(function)
|
||||
check_rbf2d_interpolation(function)
|
||||
check_rbf3d_interpolation(function)
|
||||
|
||||
|
||||
def check_2drbf1d_interpolation(function):
|
||||
# Check that the 2-D Rbf function interpolates through the nodes (1D)
|
||||
x = linspace(0, 10, 9)
|
||||
y0 = sin(x)
|
||||
y1 = cos(x)
|
||||
y = np.vstack([y0, y1]).T
|
||||
rbf = Rbf(x, y, function=function, mode='N-D')
|
||||
yi = rbf(x)
|
||||
assert_array_almost_equal(y, yi)
|
||||
assert_almost_equal(rbf(float(x[0])), y[0])
|
||||
|
||||
|
||||
def check_2drbf2d_interpolation(function):
|
||||
# Check that the 2-D Rbf function interpolates through the nodes (2D).
|
||||
x = random.rand(50, ) * 4 - 2
|
||||
y = random.rand(50, ) * 4 - 2
|
||||
z0 = x * exp(-x ** 2 - 1j * y ** 2)
|
||||
z1 = y * exp(-y ** 2 - 1j * x ** 2)
|
||||
z = np.vstack([z0, z1]).T
|
||||
rbf = Rbf(x, y, z, epsilon=2, function=function, mode='N-D')
|
||||
zi = rbf(x, y)
|
||||
zi.shape = z.shape
|
||||
assert_array_almost_equal(z, zi)
|
||||
|
||||
|
||||
def check_2drbf3d_interpolation(function):
|
||||
# Check that the 2-D Rbf function interpolates through the nodes (3D).
|
||||
x = random.rand(50, ) * 4 - 2
|
||||
y = random.rand(50, ) * 4 - 2
|
||||
z = random.rand(50, ) * 4 - 2
|
||||
d0 = x * exp(-x ** 2 - y ** 2)
|
||||
d1 = y * exp(-y ** 2 - x ** 2)
|
||||
d = np.vstack([d0, d1]).T
|
||||
rbf = Rbf(x, y, z, d, epsilon=2, function=function, mode='N-D')
|
||||
di = rbf(x, y, z)
|
||||
di.shape = d.shape
|
||||
assert_array_almost_equal(di, d)
|
||||
|
||||
|
||||
def test_2drbf_interpolation():
|
||||
for function in FUNCTIONS:
|
||||
check_2drbf1d_interpolation(function)
|
||||
check_2drbf2d_interpolation(function)
|
||||
check_2drbf3d_interpolation(function)
|
||||
|
||||
|
||||
def check_rbf1d_regularity(function, atol):
|
||||
# Check that the Rbf function approximates a smooth function well away
|
||||
# from the nodes.
|
||||
x = linspace(0, 10, 9)
|
||||
y = sin(x)
|
||||
rbf = Rbf(x, y, function=function)
|
||||
xi = linspace(0, 10, 100)
|
||||
yi = rbf(xi)
|
||||
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
|
||||
assert_(allclose(yi, sin(xi), atol=atol), msg)
|
||||
|
||||
|
||||
def test_rbf_regularity():
|
||||
tolerances = {
|
||||
'multiquadric': 0.1,
|
||||
'inverse multiquadric': 0.15,
|
||||
'gaussian': 0.15,
|
||||
'cubic': 0.15,
|
||||
'quintic': 0.1,
|
||||
'thin-plate': 0.1,
|
||||
'linear': 0.2
|
||||
}
|
||||
for function in FUNCTIONS:
|
||||
check_rbf1d_regularity(function, tolerances.get(function, 1e-2))
|
||||
|
||||
|
||||
def check_2drbf1d_regularity(function, atol):
|
||||
# Check that the 2-D Rbf function approximates a smooth function well away
|
||||
# from the nodes.
|
||||
x = linspace(0, 10, 9)
|
||||
y0 = sin(x)
|
||||
y1 = cos(x)
|
||||
y = np.vstack([y0, y1]).T
|
||||
rbf = Rbf(x, y, function=function, mode='N-D')
|
||||
xi = linspace(0, 10, 100)
|
||||
yi = rbf(xi)
|
||||
msg = "abs-diff: %f" % abs(yi - np.vstack([sin(xi), cos(xi)]).T).max()
|
||||
assert_(allclose(yi, np.vstack([sin(xi), cos(xi)]).T, atol=atol), msg)
|
||||
|
||||
|
||||
def test_2drbf_regularity():
|
||||
tolerances = {
|
||||
'multiquadric': 0.1,
|
||||
'inverse multiquadric': 0.15,
|
||||
'gaussian': 0.15,
|
||||
'cubic': 0.15,
|
||||
'quintic': 0.1,
|
||||
'thin-plate': 0.15,
|
||||
'linear': 0.2
|
||||
}
|
||||
for function in FUNCTIONS:
|
||||
check_2drbf1d_regularity(function, tolerances.get(function, 1e-2))
|
||||
|
||||
|
||||
def check_rbf1d_stability(function):
|
||||
# Check that the Rbf function with default epsilon is not subject
|
||||
# to overshoot. Regression for issue #4523.
|
||||
#
|
||||
# Generate some data (fixed random seed hence deterministic)
|
||||
np.random.seed(1234)
|
||||
x = np.linspace(0, 10, 50)
|
||||
z = x + 4.0 * np.random.randn(len(x))
|
||||
|
||||
rbf = Rbf(x, z, function=function)
|
||||
xi = np.linspace(0, 10, 1000)
|
||||
yi = rbf(xi)
|
||||
|
||||
# subtract the linear trend and make sure there no spikes
|
||||
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
|
||||
|
||||
def test_rbf_stability():
|
||||
for function in FUNCTIONS:
|
||||
check_rbf1d_stability(function)
|
||||
|
||||
|
||||
def test_default_construction():
|
||||
# Check that the Rbf class can be constructed with the default
|
||||
# multiquadric basis function. Regression test for ticket #1228.
|
||||
x = linspace(0,10,9)
|
||||
y = sin(x)
|
||||
rbf = Rbf(x, y)
|
||||
yi = rbf(x)
|
||||
assert_array_almost_equal(y, yi)
|
||||
|
||||
|
||||
def test_function_is_callable():
|
||||
# Check that the Rbf class can be constructed with function=callable.
|
||||
x = linspace(0,10,9)
|
||||
y = sin(x)
|
||||
def linfunc(x):
|
||||
return x
|
||||
rbf = Rbf(x, y, function=linfunc)
|
||||
yi = rbf(x)
|
||||
assert_array_almost_equal(y, yi)
|
||||
|
||||
|
||||
def test_two_arg_function_is_callable():
|
||||
# Check that the Rbf class can be constructed with a two argument
|
||||
# function=callable.
|
||||
def _func(self, r):
|
||||
return self.epsilon + r
|
||||
|
||||
x = linspace(0,10,9)
|
||||
y = sin(x)
|
||||
rbf = Rbf(x, y, function=_func)
|
||||
yi = rbf(x)
|
||||
assert_array_almost_equal(y, yi)
|
||||
|
||||
|
||||
def test_rbf_epsilon_none():
|
||||
x = linspace(0, 10, 9)
|
||||
y = sin(x)
|
||||
Rbf(x, y, epsilon=None)
|
||||
|
||||
|
||||
def test_rbf_epsilon_none_collinear():
|
||||
# Check that collinear points in one dimension doesn't cause an error
|
||||
# due to epsilon = 0
|
||||
x = [1, 2, 3]
|
||||
y = [4, 4, 4]
|
||||
z = [5, 6, 7]
|
||||
rbf = Rbf(x, y, z, epsilon=None)
|
||||
assert_(rbf.epsilon > 0)
|
||||
@ -0,0 +1,516 @@
|
||||
import pickle
|
||||
import pytest
|
||||
import numpy as np
|
||||
from numpy.linalg import LinAlgError
|
||||
from numpy.testing import assert_allclose
|
||||
from scipy.stats.qmc import Halton
|
||||
from scipy.spatial import cKDTree
|
||||
from scipy.interpolate._rbfinterp import (
|
||||
_AVAILABLE, _SCALE_INVARIANT, _NAME_TO_MIN_DEGREE, _monomial_powers,
|
||||
RBFInterpolator
|
||||
)
|
||||
from scipy.interpolate import _rbfinterp_pythran
|
||||
|
||||
|
||||
def _vandermonde(x, degree):
|
||||
# Returns a matrix of monomials that span polynomials with the specified
|
||||
# degree evaluated at x.
|
||||
powers = _monomial_powers(x.shape[1], degree)
|
||||
return _rbfinterp_pythran._polynomial_matrix(x, powers)
|
||||
|
||||
|
||||
def _1d_test_function(x):
|
||||
# Test function used in Wahba's "Spline Models for Observational Data".
|
||||
# domain ~= (0, 3), range ~= (-1.0, 0.2)
|
||||
x = x[:, 0]
|
||||
y = 4.26*(np.exp(-x) - 4*np.exp(-2*x) + 3*np.exp(-3*x))
|
||||
return y
|
||||
|
||||
|
||||
def _2d_test_function(x):
|
||||
# Franke's test function.
|
||||
# domain ~= (0, 1) X (0, 1), range ~= (0.0, 1.2)
|
||||
x1, x2 = x[:, 0], x[:, 1]
|
||||
term1 = 0.75 * np.exp(-(9*x1-2)**2/4 - (9*x2-2)**2/4)
|
||||
term2 = 0.75 * np.exp(-(9*x1+1)**2/49 - (9*x2+1)/10)
|
||||
term3 = 0.5 * np.exp(-(9*x1-7)**2/4 - (9*x2-3)**2/4)
|
||||
term4 = -0.2 * np.exp(-(9*x1-4)**2 - (9*x2-7)**2)
|
||||
y = term1 + term2 + term3 + term4
|
||||
return y
|
||||
|
||||
|
||||
def _is_conditionally_positive_definite(kernel, m):
|
||||
# Tests whether the kernel is conditionally positive definite of order m.
|
||||
# See chapter 7 of Fasshauer's "Meshfree Approximation Methods with
|
||||
# MATLAB".
|
||||
nx = 10
|
||||
ntests = 100
|
||||
for ndim in [1, 2, 3, 4, 5]:
|
||||
# Generate sample points with a Halton sequence to avoid samples that
|
||||
# are too close to each other, which can make the matrix singular.
|
||||
seq = Halton(ndim, scramble=False, seed=np.random.RandomState())
|
||||
for _ in range(ntests):
|
||||
x = 2*seq.random(nx) - 1
|
||||
A = _rbfinterp_pythran._kernel_matrix(x, kernel)
|
||||
P = _vandermonde(x, m - 1)
|
||||
Q, R = np.linalg.qr(P, mode='complete')
|
||||
# Q2 forms a basis spanning the space where P.T.dot(x) = 0. Project
|
||||
# A onto this space, and then see if it is positive definite using
|
||||
# the Cholesky decomposition. If not, then the kernel is not c.p.d.
|
||||
# of order m.
|
||||
Q2 = Q[:, P.shape[1]:]
|
||||
B = Q2.T.dot(A).dot(Q2)
|
||||
try:
|
||||
np.linalg.cholesky(B)
|
||||
except np.linalg.LinAlgError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# Sorting the parametrize arguments is necessary to avoid a parallelization
|
||||
# issue described here: https://github.com/pytest-dev/pytest-xdist/issues/432.
|
||||
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
|
||||
def test_conditionally_positive_definite(kernel):
|
||||
# Test if each kernel in _AVAILABLE is conditionally positive definite of
|
||||
# order m, where m comes from _NAME_TO_MIN_DEGREE. This is a necessary
|
||||
# condition for the smoothed RBF interpolant to be well-posed in general.
|
||||
m = _NAME_TO_MIN_DEGREE.get(kernel, -1) + 1
|
||||
assert _is_conditionally_positive_definite(kernel, m)
|
||||
|
||||
|
||||
class _TestRBFInterpolator:
|
||||
@pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
|
||||
def test_scale_invariance_1d(self, kernel):
|
||||
# Verify that the functions in _SCALE_INVARIANT are insensitive to the
|
||||
# shape parameter (when smoothing == 0) in 1d.
|
||||
seq = Halton(1, scramble=False, seed=np.random.RandomState())
|
||||
x = 3*seq.random(50)
|
||||
y = _1d_test_function(x)
|
||||
xitp = 3*seq.random(50)
|
||||
yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
|
||||
yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
|
||||
assert_allclose(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
@pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
|
||||
def test_scale_invariance_2d(self, kernel):
|
||||
# Verify that the functions in _SCALE_INVARIANT are insensitive to the
|
||||
# shape parameter (when smoothing == 0) in 2d.
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
x = seq.random(100)
|
||||
y = _2d_test_function(x)
|
||||
xitp = seq.random(100)
|
||||
yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
|
||||
yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
|
||||
assert_allclose(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
|
||||
def test_extreme_domains(self, kernel):
|
||||
# Make sure the interpolant remains numerically stable for very
|
||||
# large/small domains.
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
scale = 1e50
|
||||
shift = 1e55
|
||||
|
||||
x = seq.random(100)
|
||||
y = _2d_test_function(x)
|
||||
xitp = seq.random(100)
|
||||
|
||||
if kernel in _SCALE_INVARIANT:
|
||||
yitp1 = self.build(x, y, kernel=kernel)(xitp)
|
||||
yitp2 = self.build(
|
||||
x*scale + shift, y,
|
||||
kernel=kernel
|
||||
)(xitp*scale + shift)
|
||||
else:
|
||||
yitp1 = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
|
||||
yitp2 = self.build(
|
||||
x*scale + shift, y,
|
||||
epsilon=5.0/scale,
|
||||
kernel=kernel
|
||||
)(xitp*scale + shift)
|
||||
|
||||
assert_allclose(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
def test_polynomial_reproduction(self):
|
||||
# If the observed data comes from a polynomial, then the interpolant
|
||||
# should be able to reproduce the polynomial exactly, provided that
|
||||
# `degree` is sufficiently high.
|
||||
rng = np.random.RandomState(0)
|
||||
seq = Halton(2, scramble=False, seed=rng)
|
||||
degree = 3
|
||||
|
||||
x = seq.random(50)
|
||||
xitp = seq.random(50)
|
||||
|
||||
P = _vandermonde(x, degree)
|
||||
Pitp = _vandermonde(xitp, degree)
|
||||
|
||||
poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
|
||||
|
||||
y = P.dot(poly_coeffs)
|
||||
yitp1 = Pitp.dot(poly_coeffs)
|
||||
yitp2 = self.build(x, y, degree=degree)(xitp)
|
||||
|
||||
assert_allclose(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_chunking(self, monkeypatch):
|
||||
# If the observed data comes from a polynomial, then the interpolant
|
||||
# should be able to reproduce the polynomial exactly, provided that
|
||||
# `degree` is sufficiently high.
|
||||
rng = np.random.RandomState(0)
|
||||
seq = Halton(2, scramble=False, seed=rng)
|
||||
degree = 3
|
||||
|
||||
largeN = 1000 + 33
|
||||
# this is large to check that chunking of the RBFInterpolator is tested
|
||||
x = seq.random(50)
|
||||
xitp = seq.random(largeN)
|
||||
|
||||
P = _vandermonde(x, degree)
|
||||
Pitp = _vandermonde(xitp, degree)
|
||||
|
||||
poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
|
||||
|
||||
y = P.dot(poly_coeffs)
|
||||
yitp1 = Pitp.dot(poly_coeffs)
|
||||
interp = self.build(x, y, degree=degree)
|
||||
ce_real = interp._chunk_evaluator
|
||||
|
||||
def _chunk_evaluator(*args, **kwargs):
|
||||
kwargs.update(memory_budget=100)
|
||||
return ce_real(*args, **kwargs)
|
||||
|
||||
monkeypatch.setattr(interp, '_chunk_evaluator', _chunk_evaluator)
|
||||
yitp2 = interp(xitp)
|
||||
assert_allclose(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
def test_vector_data(self):
|
||||
# Make sure interpolating a vector field is the same as interpolating
|
||||
# each component separately.
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
x = seq.random(100)
|
||||
xitp = seq.random(100)
|
||||
|
||||
y = np.array([_2d_test_function(x),
|
||||
_2d_test_function(x[:, ::-1])]).T
|
||||
|
||||
yitp1 = self.build(x, y)(xitp)
|
||||
yitp2 = self.build(x, y[:, 0])(xitp)
|
||||
yitp3 = self.build(x, y[:, 1])(xitp)
|
||||
|
||||
assert_allclose(yitp1[:, 0], yitp2)
|
||||
assert_allclose(yitp1[:, 1], yitp3)
|
||||
|
||||
def test_complex_data(self):
|
||||
# Interpolating complex input should be the same as interpolating the
|
||||
# real and complex components.
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
x = seq.random(100)
|
||||
xitp = seq.random(100)
|
||||
|
||||
y = _2d_test_function(x) + 1j*_2d_test_function(x[:, ::-1])
|
||||
|
||||
yitp1 = self.build(x, y)(xitp)
|
||||
yitp2 = self.build(x, y.real)(xitp)
|
||||
yitp3 = self.build(x, y.imag)(xitp)
|
||||
|
||||
assert_allclose(yitp1.real, yitp2)
|
||||
assert_allclose(yitp1.imag, yitp3)
|
||||
|
||||
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
|
||||
def test_interpolation_misfit_1d(self, kernel):
|
||||
# Make sure that each kernel, with its default `degree` and an
|
||||
# appropriate `epsilon`, does a good job at interpolation in 1d.
|
||||
seq = Halton(1, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
x = 3*seq.random(50)
|
||||
xitp = 3*seq.random(50)
|
||||
|
||||
y = _1d_test_function(x)
|
||||
ytrue = _1d_test_function(xitp)
|
||||
yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
|
||||
|
||||
mse = np.mean((yitp - ytrue)**2)
|
||||
assert mse < 1.0e-4
|
||||
|
||||
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
|
||||
def test_interpolation_misfit_2d(self, kernel):
|
||||
# Make sure that each kernel, with its default `degree` and an
|
||||
# appropriate `epsilon`, does a good job at interpolation in 2d.
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
x = seq.random(100)
|
||||
xitp = seq.random(100)
|
||||
|
||||
y = _2d_test_function(x)
|
||||
ytrue = _2d_test_function(xitp)
|
||||
yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
|
||||
|
||||
mse = np.mean((yitp - ytrue)**2)
|
||||
assert mse < 2.0e-4
|
||||
|
||||
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
|
||||
def test_smoothing_misfit(self, kernel):
|
||||
# Make sure we can find a smoothing parameter for each kernel that
|
||||
# removes a sufficient amount of noise.
|
||||
rng = np.random.RandomState(0)
|
||||
seq = Halton(1, scramble=False, seed=rng)
|
||||
|
||||
noise = 0.2
|
||||
rmse_tol = 0.1
|
||||
smoothing_range = 10**np.linspace(-4, 1, 20)
|
||||
|
||||
x = 3*seq.random(100)
|
||||
y = _1d_test_function(x) + rng.normal(0.0, noise, (100,))
|
||||
ytrue = _1d_test_function(x)
|
||||
rmse_within_tol = False
|
||||
for smoothing in smoothing_range:
|
||||
ysmooth = self.build(
|
||||
x, y,
|
||||
epsilon=1.0,
|
||||
smoothing=smoothing,
|
||||
kernel=kernel)(x)
|
||||
rmse = np.sqrt(np.mean((ysmooth - ytrue)**2))
|
||||
if rmse < rmse_tol:
|
||||
rmse_within_tol = True
|
||||
break
|
||||
|
||||
assert rmse_within_tol
|
||||
|
||||
def test_array_smoothing(self):
|
||||
# Test using an array for `smoothing` to give less weight to a known
|
||||
# outlier.
|
||||
rng = np.random.RandomState(0)
|
||||
seq = Halton(1, scramble=False, seed=rng)
|
||||
degree = 2
|
||||
|
||||
x = seq.random(50)
|
||||
P = _vandermonde(x, degree)
|
||||
poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
|
||||
y = P.dot(poly_coeffs)
|
||||
y_with_outlier = np.copy(y)
|
||||
y_with_outlier[10] += 1.0
|
||||
smoothing = np.zeros((50,))
|
||||
smoothing[10] = 1000.0
|
||||
yitp = self.build(x, y_with_outlier, smoothing=smoothing)(x)
|
||||
# Should be able to reproduce the uncorrupted data almost exactly.
|
||||
assert_allclose(yitp, y, atol=1e-4)
|
||||
|
||||
def test_inconsistent_x_dimensions_error(self):
|
||||
# ValueError should be raised if the observation points and evaluation
|
||||
# points have a different number of dimensions.
|
||||
y = Halton(2, scramble=False, seed=np.random.RandomState()).random(10)
|
||||
d = _2d_test_function(y)
|
||||
x = Halton(1, scramble=False, seed=np.random.RandomState()).random(10)
|
||||
match = 'Expected the second axis of `x`'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d)(x)
|
||||
|
||||
def test_inconsistent_d_length_error(self):
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
d = np.zeros(1)
|
||||
match = 'Expected the first axis of `d`'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d)
|
||||
|
||||
def test_y_not_2d_error(self):
|
||||
y = np.linspace(0, 1, 5)
|
||||
d = np.zeros(5)
|
||||
match = '`y` must be a 2-dimensional array.'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d)
|
||||
|
||||
def test_inconsistent_smoothing_length_error(self):
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
d = np.zeros(5)
|
||||
smoothing = np.ones(1)
|
||||
match = 'Expected `smoothing` to be'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d, smoothing=smoothing)
|
||||
|
||||
def test_invalid_kernel_name_error(self):
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
d = np.zeros(5)
|
||||
match = '`kernel` must be one of'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d, kernel='test')
|
||||
|
||||
def test_epsilon_not_specified_error(self):
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
d = np.zeros(5)
|
||||
for kernel in _AVAILABLE:
|
||||
if kernel in _SCALE_INVARIANT:
|
||||
continue
|
||||
|
||||
match = '`epsilon` must be specified'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d, kernel=kernel)
|
||||
|
||||
def test_x_not_2d_error(self):
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
x = np.linspace(0, 1, 5)
|
||||
d = np.zeros(5)
|
||||
match = '`x` must be a 2-dimensional array.'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d)(x)
|
||||
|
||||
def test_not_enough_observations_error(self):
|
||||
y = np.linspace(0, 1, 1)[:, None]
|
||||
d = np.zeros(1)
|
||||
match = 'At least 2 data points are required'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d, kernel='thin_plate_spline')
|
||||
|
||||
def test_degree_warning(self):
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
d = np.zeros(5)
|
||||
for kernel, deg in _NAME_TO_MIN_DEGREE.items():
|
||||
# Only test for kernels that its minimum degree is not 0.
|
||||
if deg >= 1:
|
||||
match = f'`degree` should not be below {deg}'
|
||||
with pytest.warns(Warning, match=match):
|
||||
self.build(y, d, epsilon=1.0, kernel=kernel, degree=deg-1)
|
||||
|
||||
def test_minus_one_degree(self):
|
||||
# Make sure a degree of -1 is accepted without any warning.
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
d = np.zeros(5)
|
||||
for kernel, _ in _NAME_TO_MIN_DEGREE.items():
|
||||
self.build(y, d, epsilon=1.0, kernel=kernel, degree=-1)
|
||||
|
||||
def test_rank_error(self):
|
||||
# An error should be raised when `kernel` is "thin_plate_spline" and
|
||||
# observations are 2-D and collinear.
|
||||
y = np.array([[2.0, 0.0], [1.0, 0.0], [0.0, 0.0]])
|
||||
d = np.array([0.0, 0.0, 0.0])
|
||||
match = 'does not have full column rank'
|
||||
with pytest.raises(LinAlgError, match=match):
|
||||
self.build(y, d, kernel='thin_plate_spline')(y)
|
||||
|
||||
def test_single_point(self):
|
||||
# Make sure interpolation still works with only one point (in 1, 2, and
|
||||
# 3 dimensions).
|
||||
for dim in [1, 2, 3]:
|
||||
y = np.zeros((1, dim))
|
||||
d = np.ones((1,))
|
||||
f = self.build(y, d, kernel='linear')(y)
|
||||
assert_allclose(d, f)
|
||||
|
||||
def test_pickleable(self):
|
||||
# Make sure we can pickle and unpickle the interpolant without any
|
||||
# changes in the behavior.
|
||||
seq = Halton(1, scramble=False, seed=np.random.RandomState(2305982309))
|
||||
|
||||
x = 3*seq.random(50)
|
||||
xitp = 3*seq.random(50)
|
||||
|
||||
y = _1d_test_function(x)
|
||||
|
||||
interp = self.build(x, y)
|
||||
|
||||
yitp1 = interp(xitp)
|
||||
yitp2 = pickle.loads(pickle.dumps(interp))(xitp)
|
||||
|
||||
assert_allclose(yitp1, yitp2, atol=1e-16)
|
||||
|
||||
|
||||
class TestRBFInterpolatorNeighborsNone(_TestRBFInterpolator):
|
||||
def build(self, *args, **kwargs):
|
||||
return RBFInterpolator(*args, **kwargs)
|
||||
|
||||
def test_smoothing_limit_1d(self):
|
||||
# For large smoothing parameters, the interpolant should approach a
|
||||
# least squares fit of a polynomial with the specified degree.
|
||||
seq = Halton(1, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
degree = 3
|
||||
smoothing = 1e8
|
||||
|
||||
x = 3*seq.random(50)
|
||||
xitp = 3*seq.random(50)
|
||||
|
||||
y = _1d_test_function(x)
|
||||
|
||||
yitp1 = self.build(
|
||||
x, y,
|
||||
degree=degree,
|
||||
smoothing=smoothing
|
||||
)(xitp)
|
||||
|
||||
P = _vandermonde(x, degree)
|
||||
Pitp = _vandermonde(xitp, degree)
|
||||
yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
|
||||
|
||||
assert_allclose(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
def test_smoothing_limit_2d(self):
|
||||
# For large smoothing parameters, the interpolant should approach a
|
||||
# least squares fit of a polynomial with the specified degree.
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
degree = 3
|
||||
smoothing = 1e8
|
||||
|
||||
x = seq.random(100)
|
||||
xitp = seq.random(100)
|
||||
|
||||
y = _2d_test_function(x)
|
||||
|
||||
yitp1 = self.build(
|
||||
x, y,
|
||||
degree=degree,
|
||||
smoothing=smoothing
|
||||
)(xitp)
|
||||
|
||||
P = _vandermonde(x, degree)
|
||||
Pitp = _vandermonde(xitp, degree)
|
||||
yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
|
||||
|
||||
assert_allclose(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
|
||||
class TestRBFInterpolatorNeighbors20(_TestRBFInterpolator):
|
||||
# RBFInterpolator using 20 nearest neighbors.
|
||||
def build(self, *args, **kwargs):
|
||||
return RBFInterpolator(*args, **kwargs, neighbors=20)
|
||||
|
||||
def test_equivalent_to_rbf_interpolator(self):
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
x = seq.random(100)
|
||||
xitp = seq.random(100)
|
||||
|
||||
y = _2d_test_function(x)
|
||||
|
||||
yitp1 = self.build(x, y)(xitp)
|
||||
|
||||
yitp2 = []
|
||||
tree = cKDTree(x)
|
||||
for xi in xitp:
|
||||
_, nbr = tree.query(xi, 20)
|
||||
yitp2.append(RBFInterpolator(x[nbr], y[nbr])(xi[None])[0])
|
||||
|
||||
assert_allclose(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
|
||||
class TestRBFInterpolatorNeighborsInf(TestRBFInterpolatorNeighborsNone):
|
||||
# RBFInterpolator using neighbors=np.inf. This should give exactly the same
|
||||
# results as neighbors=None, but it will be slower.
|
||||
def build(self, *args, **kwargs):
|
||||
return RBFInterpolator(*args, **kwargs, neighbors=np.inf)
|
||||
|
||||
def test_equivalent_to_rbf_interpolator(self):
|
||||
seq = Halton(1, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
x = 3*seq.random(50)
|
||||
xitp = 3*seq.random(50)
|
||||
|
||||
y = _1d_test_function(x)
|
||||
yitp1 = self.build(x, y)(xitp)
|
||||
yitp2 = RBFInterpolator(x, y)(xitp)
|
||||
|
||||
assert_allclose(yitp1, yitp2, atol=1e-8)
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user