asd
This commit is contained in:
Binary file not shown.
@ -0,0 +1,349 @@
|
||||
"""Test functions for the sparse.linalg._expm_multiply module."""
|
||||
from functools import partial
|
||||
from itertools import product
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from numpy.testing import (assert_allclose, assert_, assert_equal,
|
||||
suppress_warnings)
|
||||
from scipy.sparse import SparseEfficiencyWarning
|
||||
from scipy.sparse.linalg import aslinearoperator
|
||||
import scipy.linalg
|
||||
from scipy.sparse.linalg import expm as sp_expm
|
||||
from scipy.sparse.linalg._expm_multiply import (_theta, _compute_p_max,
|
||||
_onenormest_matrix_power, expm_multiply, _expm_multiply_simple,
|
||||
_expm_multiply_interval)
|
||||
from scipy._lib._util import np_long
|
||||
|
||||
|
||||
IMPRECISE = {np.single, np.csingle}
|
||||
REAL_DTYPES = {np.intc, np_long, np.longlong,
|
||||
np.float32, np.float64, np.longdouble}
|
||||
COMPLEX_DTYPES = {np.complex64, np.complex128, np.clongdouble}
|
||||
# use sorted list to ensure fixed order of tests
|
||||
DTYPES = sorted(REAL_DTYPES ^ COMPLEX_DTYPES, key=str)
|
||||
|
||||
|
||||
def estimated(func):
|
||||
"""If trace is estimated, it should warn.
|
||||
|
||||
We warn that estimation of trace might impact performance.
|
||||
All result have to be correct nevertheless!
|
||||
|
||||
"""
|
||||
def wrapped(*args, **kwds):
|
||||
with pytest.warns(UserWarning,
|
||||
match="Trace of LinearOperator not available"):
|
||||
return func(*args, **kwds)
|
||||
return wrapped
|
||||
|
||||
|
||||
def less_than_or_close(a, b):
|
||||
return np.allclose(a, b) or (a < b)
|
||||
|
||||
|
||||
class TestExpmActionSimple:
|
||||
"""
|
||||
These tests do not consider the case of multiple time steps in one call.
|
||||
"""
|
||||
|
||||
def test_theta_monotonicity(self):
|
||||
pairs = sorted(_theta.items())
|
||||
for (m_a, theta_a), (m_b, theta_b) in zip(pairs[:-1], pairs[1:]):
|
||||
assert_(theta_a < theta_b)
|
||||
|
||||
def test_p_max_default(self):
|
||||
m_max = 55
|
||||
expected_p_max = 8
|
||||
observed_p_max = _compute_p_max(m_max)
|
||||
assert_equal(observed_p_max, expected_p_max)
|
||||
|
||||
def test_p_max_range(self):
|
||||
for m_max in range(1, 55+1):
|
||||
p_max = _compute_p_max(m_max)
|
||||
assert_(p_max*(p_max - 1) <= m_max + 1)
|
||||
p_too_big = p_max + 1
|
||||
assert_(p_too_big*(p_too_big - 1) > m_max + 1)
|
||||
|
||||
def test_onenormest_matrix_power(self):
|
||||
np.random.seed(1234)
|
||||
n = 40
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
for p in range(4):
|
||||
if not p:
|
||||
M = np.identity(n)
|
||||
else:
|
||||
M = np.dot(M, A)
|
||||
estimated = _onenormest_matrix_power(A, p)
|
||||
exact = np.linalg.norm(M, 1)
|
||||
assert_(less_than_or_close(estimated, exact))
|
||||
assert_(less_than_or_close(exact, 3*estimated))
|
||||
|
||||
def test_expm_multiply(self):
|
||||
np.random.seed(1234)
|
||||
n = 40
|
||||
k = 3
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
B = np.random.randn(n, k)
|
||||
observed = expm_multiply(A, B)
|
||||
expected = np.dot(sp_expm(A), B)
|
||||
assert_allclose(observed, expected)
|
||||
observed = estimated(expm_multiply)(aslinearoperator(A), B)
|
||||
assert_allclose(observed, expected)
|
||||
traceA = np.trace(A)
|
||||
observed = expm_multiply(aslinearoperator(A), B, traceA=traceA)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
def test_matrix_vector_multiply(self):
|
||||
np.random.seed(1234)
|
||||
n = 40
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
v = np.random.randn(n)
|
||||
observed = expm_multiply(A, v)
|
||||
expected = np.dot(sp_expm(A), v)
|
||||
assert_allclose(observed, expected)
|
||||
observed = estimated(expm_multiply)(aslinearoperator(A), v)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
def test_scaled_expm_multiply(self):
|
||||
np.random.seed(1234)
|
||||
n = 40
|
||||
k = 3
|
||||
nsamples = 10
|
||||
for i, t in product(range(nsamples), [0.2, 1.0, 1.5]):
|
||||
with np.errstate(invalid='ignore'):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
B = np.random.randn(n, k)
|
||||
observed = _expm_multiply_simple(A, B, t=t)
|
||||
expected = np.dot(sp_expm(t*A), B)
|
||||
assert_allclose(observed, expected)
|
||||
observed = estimated(_expm_multiply_simple)(
|
||||
aslinearoperator(A), B, t=t
|
||||
)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
def test_scaled_expm_multiply_single_timepoint(self):
|
||||
np.random.seed(1234)
|
||||
t = 0.1
|
||||
n = 5
|
||||
k = 2
|
||||
A = np.random.randn(n, n)
|
||||
B = np.random.randn(n, k)
|
||||
observed = _expm_multiply_simple(A, B, t=t)
|
||||
expected = sp_expm(t*A).dot(B)
|
||||
assert_allclose(observed, expected)
|
||||
observed = estimated(_expm_multiply_simple)(
|
||||
aslinearoperator(A), B, t=t
|
||||
)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
def test_sparse_expm_multiply(self):
|
||||
np.random.seed(1234)
|
||||
n = 40
|
||||
k = 3
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
A = scipy.sparse.rand(n, n, density=0.05)
|
||||
B = np.random.randn(n, k)
|
||||
observed = expm_multiply(A, B)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(SparseEfficiencyWarning,
|
||||
"splu converted its input to CSC format")
|
||||
sup.filter(SparseEfficiencyWarning,
|
||||
"spsolve is more efficient when sparse b is in the"
|
||||
" CSC matrix format")
|
||||
expected = sp_expm(A).dot(B)
|
||||
assert_allclose(observed, expected)
|
||||
observed = estimated(expm_multiply)(aslinearoperator(A), B)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
def test_complex(self):
|
||||
A = np.array([
|
||||
[1j, 1j],
|
||||
[0, 1j]], dtype=complex)
|
||||
B = np.array([1j, 1j])
|
||||
observed = expm_multiply(A, B)
|
||||
expected = np.array([
|
||||
1j * np.exp(1j) + 1j * (1j*np.cos(1) - np.sin(1)),
|
||||
1j * np.exp(1j)], dtype=complex)
|
||||
assert_allclose(observed, expected)
|
||||
observed = estimated(expm_multiply)(aslinearoperator(A), B)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
|
||||
class TestExpmActionInterval:
|
||||
|
||||
@pytest.mark.fail_slow(5)
|
||||
def test_sparse_expm_multiply_interval(self):
|
||||
np.random.seed(1234)
|
||||
start = 0.1
|
||||
stop = 3.2
|
||||
n = 40
|
||||
k = 3
|
||||
endpoint = True
|
||||
for num in (14, 13, 2):
|
||||
A = scipy.sparse.rand(n, n, density=0.05)
|
||||
B = np.random.randn(n, k)
|
||||
v = np.random.randn(n)
|
||||
for target in (B, v):
|
||||
X = expm_multiply(A, target, start=start, stop=stop,
|
||||
num=num, endpoint=endpoint)
|
||||
samples = np.linspace(start=start, stop=stop,
|
||||
num=num, endpoint=endpoint)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(SparseEfficiencyWarning,
|
||||
"splu converted its input to CSC format")
|
||||
sup.filter(SparseEfficiencyWarning,
|
||||
"spsolve is more efficient when sparse b is in"
|
||||
" the CSC matrix format")
|
||||
for solution, t in zip(X, samples):
|
||||
assert_allclose(solution, sp_expm(t*A).dot(target))
|
||||
|
||||
@pytest.mark.fail_slow(5)
|
||||
def test_expm_multiply_interval_vector(self):
|
||||
np.random.seed(1234)
|
||||
interval = {'start': 0.1, 'stop': 3.2, 'endpoint': True}
|
||||
for num, n in product([14, 13, 2], [1, 2, 5, 20, 40]):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
v = np.random.randn(n)
|
||||
samples = np.linspace(num=num, **interval)
|
||||
X = expm_multiply(A, v, num=num, **interval)
|
||||
for solution, t in zip(X, samples):
|
||||
assert_allclose(solution, sp_expm(t*A).dot(v))
|
||||
# test for linear operator with unknown trace -> estimate trace
|
||||
Xguess = estimated(expm_multiply)(aslinearoperator(A), v,
|
||||
num=num, **interval)
|
||||
# test for linear operator with given trace
|
||||
Xgiven = expm_multiply(aslinearoperator(A), v, num=num, **interval,
|
||||
traceA=np.trace(A))
|
||||
# test robustness for linear operator with wrong trace
|
||||
Xwrong = expm_multiply(aslinearoperator(A), v, num=num, **interval,
|
||||
traceA=np.trace(A)*5)
|
||||
for sol_guess, sol_given, sol_wrong, t in zip(Xguess, Xgiven,
|
||||
Xwrong, samples):
|
||||
correct = sp_expm(t*A).dot(v)
|
||||
assert_allclose(sol_guess, correct)
|
||||
assert_allclose(sol_given, correct)
|
||||
assert_allclose(sol_wrong, correct)
|
||||
|
||||
@pytest.mark.fail_slow(5)
|
||||
def test_expm_multiply_interval_matrix(self):
|
||||
np.random.seed(1234)
|
||||
interval = {'start': 0.1, 'stop': 3.2, 'endpoint': True}
|
||||
for num, n, k in product([14, 13, 2], [1, 2, 5, 20, 40], [1, 2]):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
B = np.random.randn(n, k)
|
||||
samples = np.linspace(num=num, **interval)
|
||||
X = expm_multiply(A, B, num=num, **interval)
|
||||
for solution, t in zip(X, samples):
|
||||
assert_allclose(solution, sp_expm(t*A).dot(B))
|
||||
X = estimated(expm_multiply)(aslinearoperator(A), B, num=num,
|
||||
**interval)
|
||||
for solution, t in zip(X, samples):
|
||||
assert_allclose(solution, sp_expm(t*A).dot(B))
|
||||
|
||||
def test_sparse_expm_multiply_interval_dtypes(self):
|
||||
# Test A & B int
|
||||
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
|
||||
B = np.ones(5, dtype=int)
|
||||
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
|
||||
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
|
||||
|
||||
# Test A complex, B int
|
||||
A = scipy.sparse.diags(-1j*np.arange(5),format='csr', dtype=complex)
|
||||
B = np.ones(5, dtype=int)
|
||||
Aexpm = scipy.sparse.diags(np.exp(-1j*np.arange(5)),format='csr')
|
||||
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
|
||||
|
||||
# Test A int, B complex
|
||||
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
|
||||
B = np.full(5, 1j, dtype=complex)
|
||||
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
|
||||
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
|
||||
|
||||
def test_expm_multiply_interval_status_0(self):
|
||||
self._help_test_specific_expm_interval_status(0)
|
||||
|
||||
def test_expm_multiply_interval_status_1(self):
|
||||
self._help_test_specific_expm_interval_status(1)
|
||||
|
||||
def test_expm_multiply_interval_status_2(self):
|
||||
self._help_test_specific_expm_interval_status(2)
|
||||
|
||||
def _help_test_specific_expm_interval_status(self, target_status):
|
||||
np.random.seed(1234)
|
||||
start = 0.1
|
||||
stop = 3.2
|
||||
num = 13
|
||||
endpoint = True
|
||||
n = 5
|
||||
k = 2
|
||||
nrepeats = 10
|
||||
nsuccesses = 0
|
||||
for num in [14, 13, 2] * nrepeats:
|
||||
A = np.random.randn(n, n)
|
||||
B = np.random.randn(n, k)
|
||||
status = _expm_multiply_interval(A, B,
|
||||
start=start, stop=stop, num=num, endpoint=endpoint,
|
||||
status_only=True)
|
||||
if status == target_status:
|
||||
X, status = _expm_multiply_interval(A, B,
|
||||
start=start, stop=stop, num=num, endpoint=endpoint,
|
||||
status_only=False)
|
||||
assert_equal(X.shape, (num, n, k))
|
||||
samples = np.linspace(start=start, stop=stop,
|
||||
num=num, endpoint=endpoint)
|
||||
for solution, t in zip(X, samples):
|
||||
assert_allclose(solution, sp_expm(t*A).dot(B))
|
||||
nsuccesses += 1
|
||||
if not nsuccesses:
|
||||
msg = 'failed to find a status-' + str(target_status) + ' interval'
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype_a", DTYPES)
|
||||
@pytest.mark.parametrize("dtype_b", DTYPES)
|
||||
@pytest.mark.parametrize("b_is_matrix", [False, True])
|
||||
def test_expm_multiply_dtype(dtype_a, dtype_b, b_is_matrix):
|
||||
"""Make sure `expm_multiply` handles all numerical dtypes correctly."""
|
||||
assert_allclose_ = (partial(assert_allclose, rtol=1.2e-3, atol=1e-5)
|
||||
if {dtype_a, dtype_b} & IMPRECISE else assert_allclose)
|
||||
rng = np.random.default_rng(1234)
|
||||
# test data
|
||||
n = 7
|
||||
b_shape = (n, 3) if b_is_matrix else (n, )
|
||||
if dtype_a in REAL_DTYPES:
|
||||
A = scipy.linalg.inv(rng.random([n, n])).astype(dtype_a)
|
||||
else:
|
||||
A = scipy.linalg.inv(
|
||||
rng.random([n, n]) + 1j*rng.random([n, n])
|
||||
).astype(dtype_a)
|
||||
if dtype_b in REAL_DTYPES:
|
||||
B = (2*rng.random(b_shape)).astype(dtype_b)
|
||||
else:
|
||||
B = (rng.random(b_shape) + 1j*rng.random(b_shape)).astype(dtype_b)
|
||||
|
||||
# single application
|
||||
sol_mat = expm_multiply(A, B)
|
||||
sol_op = estimated(expm_multiply)(aslinearoperator(A), B)
|
||||
direct_sol = np.dot(sp_expm(A), B)
|
||||
assert_allclose_(sol_mat, direct_sol)
|
||||
assert_allclose_(sol_op, direct_sol)
|
||||
sol_op = expm_multiply(aslinearoperator(A), B, traceA=np.trace(A))
|
||||
assert_allclose_(sol_op, direct_sol)
|
||||
|
||||
# for time points
|
||||
interval = {'start': 0.1, 'stop': 3.2, 'num': 13, 'endpoint': True}
|
||||
samples = np.linspace(**interval)
|
||||
X_mat = expm_multiply(A, B, **interval)
|
||||
X_op = estimated(expm_multiply)(aslinearoperator(A), B, **interval)
|
||||
for sol_mat, sol_op, t in zip(X_mat, X_op, samples):
|
||||
direct_sol = sp_expm(t*A).dot(B)
|
||||
assert_allclose_(sol_mat, direct_sol)
|
||||
assert_allclose_(sol_op, direct_sol)
|
||||
@ -0,0 +1,481 @@
|
||||
"""Test functions for the sparse.linalg._interface module
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
from itertools import product
|
||||
import operator
|
||||
from pytest import raises as assert_raises, warns
|
||||
from numpy.testing import assert_, assert_equal
|
||||
|
||||
import numpy as np
|
||||
import scipy.sparse as sparse
|
||||
|
||||
import scipy.sparse.linalg._interface as interface
|
||||
from scipy.sparse._sputils import matrix
|
||||
|
||||
|
||||
class TestLinearOperator:
|
||||
def setup_method(self):
|
||||
self.A = np.array([[1,2,3],
|
||||
[4,5,6]])
|
||||
self.B = np.array([[1,2],
|
||||
[3,4],
|
||||
[5,6]])
|
||||
self.C = np.array([[1,2],
|
||||
[3,4]])
|
||||
|
||||
def test_matvec(self):
|
||||
def get_matvecs(A):
|
||||
return [{
|
||||
'shape': A.shape,
|
||||
'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]),
|
||||
'rmatvec': lambda x: np.dot(A.T.conj(),
|
||||
x).reshape(A.shape[1])
|
||||
},
|
||||
{
|
||||
'shape': A.shape,
|
||||
'matvec': lambda x: np.dot(A, x),
|
||||
'rmatvec': lambda x: np.dot(A.T.conj(), x),
|
||||
'rmatmat': lambda x: np.dot(A.T.conj(), x),
|
||||
'matmat': lambda x: np.dot(A, x)
|
||||
}]
|
||||
|
||||
for matvecs in get_matvecs(self.A):
|
||||
A = interface.LinearOperator(**matvecs)
|
||||
|
||||
assert_(A.args == ())
|
||||
|
||||
assert_equal(A.matvec(np.array([1,2,3])), [14,32])
|
||||
assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
|
||||
assert_equal(A * np.array([1,2,3]), [14,32])
|
||||
assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
|
||||
assert_equal(A.dot(np.array([1,2,3])), [14,32])
|
||||
assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
|
||||
|
||||
assert_equal(A.matvec(matrix([[1],[2],[3]])), [[14],[32]])
|
||||
assert_equal(A * matrix([[1],[2],[3]]), [[14],[32]])
|
||||
assert_equal(A.dot(matrix([[1],[2],[3]])), [[14],[32]])
|
||||
|
||||
assert_equal((2*A)*[1,1,1], [12,30])
|
||||
assert_equal((2 * A).rmatvec([1, 1]), [10, 14, 18])
|
||||
assert_equal((2*A).H.matvec([1,1]), [10, 14, 18])
|
||||
assert_equal((2*A)*[[1],[1],[1]], [[12],[30]])
|
||||
assert_equal((2 * A).matmat([[1], [1], [1]]), [[12], [30]])
|
||||
assert_equal((A*2)*[1,1,1], [12,30])
|
||||
assert_equal((A*2)*[[1],[1],[1]], [[12],[30]])
|
||||
assert_equal((2j*A)*[1,1,1], [12j,30j])
|
||||
assert_equal((A+A)*[1,1,1], [12, 30])
|
||||
assert_equal((A + A).rmatvec([1, 1]), [10, 14, 18])
|
||||
assert_equal((A+A).H.matvec([1,1]), [10, 14, 18])
|
||||
assert_equal((A+A)*[[1],[1],[1]], [[12], [30]])
|
||||
assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]])
|
||||
assert_equal((-A)*[1,1,1], [-6,-15])
|
||||
assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]])
|
||||
assert_equal((A-A)*[1,1,1], [0,0])
|
||||
assert_equal((A - A) * [[1], [1], [1]], [[0], [0]])
|
||||
|
||||
X = np.array([[1, 2], [3, 4]])
|
||||
# A_asarray = np.array([[1, 2, 3], [4, 5, 6]])
|
||||
assert_equal((2 * A).rmatmat(X), np.dot((2 * self.A).T, X))
|
||||
assert_equal((A * 2).rmatmat(X), np.dot((self.A * 2).T, X))
|
||||
assert_equal((2j * A).rmatmat(X),
|
||||
np.dot((2j * self.A).T.conj(), X))
|
||||
assert_equal((A * 2j).rmatmat(X),
|
||||
np.dot((self.A * 2j).T.conj(), X))
|
||||
assert_equal((A + A).rmatmat(X),
|
||||
np.dot((self.A + self.A).T, X))
|
||||
assert_equal((A + 2j * A).rmatmat(X),
|
||||
np.dot((self.A + 2j * self.A).T.conj(), X))
|
||||
assert_equal((-A).rmatmat(X), np.dot((-self.A).T, X))
|
||||
assert_equal((A - A).rmatmat(X),
|
||||
np.dot((self.A - self.A).T, X))
|
||||
assert_equal((2j * A).rmatmat(2j * X),
|
||||
np.dot((2j * self.A).T.conj(), 2j * X))
|
||||
|
||||
z = A+A
|
||||
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A)
|
||||
z = 2*A
|
||||
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2)
|
||||
|
||||
assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray))
|
||||
assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray))
|
||||
assert_(isinstance(A * np.array([1,2,3]), np.ndarray))
|
||||
assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray))
|
||||
assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray))
|
||||
assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray))
|
||||
|
||||
assert_(isinstance(A.matvec(matrix([[1],[2],[3]])), np.ndarray))
|
||||
assert_(isinstance(A * matrix([[1],[2],[3]]), np.ndarray))
|
||||
assert_(isinstance(A.dot(matrix([[1],[2],[3]])), np.ndarray))
|
||||
|
||||
assert_(isinstance(2*A, interface._ScaledLinearOperator))
|
||||
assert_(isinstance(2j*A, interface._ScaledLinearOperator))
|
||||
assert_(isinstance(A+A, interface._SumLinearOperator))
|
||||
assert_(isinstance(-A, interface._ScaledLinearOperator))
|
||||
assert_(isinstance(A-A, interface._SumLinearOperator))
|
||||
assert_(isinstance(A/2, interface._ScaledLinearOperator))
|
||||
assert_(isinstance(A/2j, interface._ScaledLinearOperator))
|
||||
assert_(((A * 3) / 3).args[0] is A) # check for simplification
|
||||
|
||||
# Test that prefactor is of _ScaledLinearOperator is not mutated
|
||||
# when the operator is multiplied by a number
|
||||
result = A @ np.array([1, 2, 3])
|
||||
B = A * 3
|
||||
C = A / 5
|
||||
assert_equal(A @ np.array([1, 2, 3]), result)
|
||||
|
||||
assert_((2j*A).dtype == np.complex128)
|
||||
|
||||
# Test division by non-scalar
|
||||
msg = "Can only divide a linear operator by a scalar."
|
||||
with assert_raises(ValueError, match=msg):
|
||||
A / np.array([1, 2])
|
||||
|
||||
assert_raises(ValueError, A.matvec, np.array([1,2]))
|
||||
assert_raises(ValueError, A.matvec, np.array([1,2,3,4]))
|
||||
assert_raises(ValueError, A.matvec, np.array([[1],[2]]))
|
||||
assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]]))
|
||||
|
||||
assert_raises(ValueError, lambda: A*A)
|
||||
assert_raises(ValueError, lambda: A**2)
|
||||
|
||||
for matvecsA, matvecsB in product(get_matvecs(self.A),
|
||||
get_matvecs(self.B)):
|
||||
A = interface.LinearOperator(**matvecsA)
|
||||
B = interface.LinearOperator(**matvecsB)
|
||||
# AtimesB = np.array([[22, 28], [49, 64]])
|
||||
AtimesB = self.A.dot(self.B)
|
||||
X = np.array([[1, 2], [3, 4]])
|
||||
|
||||
assert_equal((A * B).rmatmat(X), np.dot((AtimesB).T, X))
|
||||
assert_equal((2j * A * B).rmatmat(X),
|
||||
np.dot((2j * AtimesB).T.conj(), X))
|
||||
|
||||
assert_equal((A*B)*[1,1], [50,113])
|
||||
assert_equal((A*B)*[[1],[1]], [[50],[113]])
|
||||
assert_equal((A*B).matmat([[1],[1]]), [[50],[113]])
|
||||
|
||||
assert_equal((A * B).rmatvec([1, 1]), [71, 92])
|
||||
assert_equal((A * B).H.matvec([1, 1]), [71, 92])
|
||||
|
||||
assert_(isinstance(A*B, interface._ProductLinearOperator))
|
||||
|
||||
assert_raises(ValueError, lambda: A+B)
|
||||
assert_raises(ValueError, lambda: A**2)
|
||||
|
||||
z = A*B
|
||||
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B)
|
||||
|
||||
for matvecsC in get_matvecs(self.C):
|
||||
C = interface.LinearOperator(**matvecsC)
|
||||
X = np.array([[1, 2], [3, 4]])
|
||||
|
||||
assert_equal(C.rmatmat(X), np.dot((self.C).T, X))
|
||||
assert_equal((C**2).rmatmat(X),
|
||||
np.dot((np.dot(self.C, self.C)).T, X))
|
||||
|
||||
assert_equal((C**2)*[1,1], [17,37])
|
||||
assert_equal((C**2).rmatvec([1, 1]), [22, 32])
|
||||
assert_equal((C**2).H.matvec([1, 1]), [22, 32])
|
||||
assert_equal((C**2).matmat([[1],[1]]), [[17],[37]])
|
||||
|
||||
assert_(isinstance(C**2, interface._PowerLinearOperator))
|
||||
|
||||
def test_matmul(self):
|
||||
D = {'shape': self.A.shape,
|
||||
'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]),
|
||||
'rmatvec': lambda x: np.dot(self.A.T.conj(),
|
||||
x).reshape(self.A.shape[1]),
|
||||
'rmatmat': lambda x: np.dot(self.A.T.conj(), x),
|
||||
'matmat': lambda x: np.dot(self.A, x)}
|
||||
A = interface.LinearOperator(**D)
|
||||
B = np.array([[1 + 1j, 2, 3],
|
||||
[4, 5, 6],
|
||||
[7, 8, 9]])
|
||||
b = B[0]
|
||||
|
||||
assert_equal(operator.matmul(A, b), A * b)
|
||||
assert_equal(operator.matmul(A, b.reshape(-1, 1)), A * b.reshape(-1, 1))
|
||||
assert_equal(operator.matmul(A, B), A * B)
|
||||
assert_equal(operator.matmul(b, A.H), b * A.H)
|
||||
assert_equal(operator.matmul(b.reshape(1, -1), A.H), b.reshape(1, -1) * A.H)
|
||||
assert_equal(operator.matmul(B, A.H), B * A.H)
|
||||
assert_raises(ValueError, operator.matmul, A, 2)
|
||||
assert_raises(ValueError, operator.matmul, 2, A)
|
||||
|
||||
|
||||
class TestAsLinearOperator:
|
||||
def setup_method(self):
|
||||
self.cases = []
|
||||
|
||||
def make_cases(original, dtype):
|
||||
cases = []
|
||||
|
||||
cases.append((matrix(original, dtype=dtype), original))
|
||||
cases.append((np.array(original, dtype=dtype), original))
|
||||
cases.append((sparse.csr_matrix(original, dtype=dtype), original))
|
||||
|
||||
# Test default implementations of _adjoint and _rmatvec, which
|
||||
# refer to each other.
|
||||
def mv(x, dtype):
|
||||
y = original.dot(x)
|
||||
if len(x.shape) == 2:
|
||||
y = y.reshape(-1, 1)
|
||||
return y
|
||||
|
||||
def rmv(x, dtype):
|
||||
return original.T.conj().dot(x)
|
||||
|
||||
class BaseMatlike(interface.LinearOperator):
|
||||
args = ()
|
||||
|
||||
def __init__(self, dtype):
|
||||
self.dtype = np.dtype(dtype)
|
||||
self.shape = original.shape
|
||||
|
||||
def _matvec(self, x):
|
||||
return mv(x, self.dtype)
|
||||
|
||||
class HasRmatvec(BaseMatlike):
|
||||
args = ()
|
||||
|
||||
def _rmatvec(self,x):
|
||||
return rmv(x, self.dtype)
|
||||
|
||||
class HasAdjoint(BaseMatlike):
|
||||
args = ()
|
||||
|
||||
def _adjoint(self):
|
||||
shape = self.shape[1], self.shape[0]
|
||||
matvec = partial(rmv, dtype=self.dtype)
|
||||
rmatvec = partial(mv, dtype=self.dtype)
|
||||
return interface.LinearOperator(matvec=matvec,
|
||||
rmatvec=rmatvec,
|
||||
dtype=self.dtype,
|
||||
shape=shape)
|
||||
|
||||
class HasRmatmat(HasRmatvec):
|
||||
def _matmat(self, x):
|
||||
return original.dot(x)
|
||||
|
||||
def _rmatmat(self, x):
|
||||
return original.T.conj().dot(x)
|
||||
|
||||
cases.append((HasRmatvec(dtype), original))
|
||||
cases.append((HasAdjoint(dtype), original))
|
||||
cases.append((HasRmatmat(dtype), original))
|
||||
return cases
|
||||
|
||||
original = np.array([[1,2,3], [4,5,6]])
|
||||
self.cases += make_cases(original, np.int32)
|
||||
self.cases += make_cases(original, np.float32)
|
||||
self.cases += make_cases(original, np.float64)
|
||||
self.cases += [(interface.aslinearoperator(M).T, A.T)
|
||||
for M, A in make_cases(original.T, np.float64)]
|
||||
self.cases += [(interface.aslinearoperator(M).H, A.T.conj())
|
||||
for M, A in make_cases(original.T, np.float64)]
|
||||
|
||||
original = np.array([[1, 2j, 3j], [4j, 5j, 6]])
|
||||
self.cases += make_cases(original, np.complex128)
|
||||
self.cases += [(interface.aslinearoperator(M).T, A.T)
|
||||
for M, A in make_cases(original.T, np.complex128)]
|
||||
self.cases += [(interface.aslinearoperator(M).H, A.T.conj())
|
||||
for M, A in make_cases(original.T, np.complex128)]
|
||||
|
||||
def test_basic(self):
|
||||
|
||||
for M, A_array in self.cases:
|
||||
A = interface.aslinearoperator(M)
|
||||
M,N = A.shape
|
||||
|
||||
xs = [np.array([1, 2, 3]),
|
||||
np.array([[1], [2], [3]])]
|
||||
ys = [np.array([1, 2]), np.array([[1], [2]])]
|
||||
|
||||
if A.dtype == np.complex128:
|
||||
xs += [np.array([1, 2j, 3j]),
|
||||
np.array([[1], [2j], [3j]])]
|
||||
ys += [np.array([1, 2j]), np.array([[1], [2j]])]
|
||||
|
||||
x2 = np.array([[1, 4], [2, 5], [3, 6]])
|
||||
|
||||
for x in xs:
|
||||
assert_equal(A.matvec(x), A_array.dot(x))
|
||||
assert_equal(A * x, A_array.dot(x))
|
||||
|
||||
assert_equal(A.matmat(x2), A_array.dot(x2))
|
||||
assert_equal(A * x2, A_array.dot(x2))
|
||||
|
||||
for y in ys:
|
||||
assert_equal(A.rmatvec(y), A_array.T.conj().dot(y))
|
||||
assert_equal(A.T.matvec(y), A_array.T.dot(y))
|
||||
assert_equal(A.H.matvec(y), A_array.T.conj().dot(y))
|
||||
|
||||
for y in ys:
|
||||
if y.ndim < 2:
|
||||
continue
|
||||
assert_equal(A.rmatmat(y), A_array.T.conj().dot(y))
|
||||
assert_equal(A.T.matmat(y), A_array.T.dot(y))
|
||||
assert_equal(A.H.matmat(y), A_array.T.conj().dot(y))
|
||||
|
||||
if hasattr(M,'dtype'):
|
||||
assert_equal(A.dtype, M.dtype)
|
||||
|
||||
assert_(hasattr(A, 'args'))
|
||||
|
||||
def test_dot(self):
|
||||
|
||||
for M, A_array in self.cases:
|
||||
A = interface.aslinearoperator(M)
|
||||
M,N = A.shape
|
||||
|
||||
x0 = np.array([1, 2, 3])
|
||||
x1 = np.array([[1], [2], [3]])
|
||||
x2 = np.array([[1, 4], [2, 5], [3, 6]])
|
||||
|
||||
assert_equal(A.dot(x0), A_array.dot(x0))
|
||||
assert_equal(A.dot(x1), A_array.dot(x1))
|
||||
assert_equal(A.dot(x2), A_array.dot(x2))
|
||||
|
||||
|
||||
def test_repr():
|
||||
A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1)
|
||||
repr_A = repr(A)
|
||||
assert_('unspecified dtype' not in repr_A, repr_A)
|
||||
|
||||
|
||||
def test_identity():
|
||||
ident = interface.IdentityOperator((3, 3))
|
||||
assert_equal(ident * [1, 2, 3], [1, 2, 3])
|
||||
assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9))
|
||||
|
||||
assert_raises(ValueError, ident.matvec, [1, 2, 3, 4])
|
||||
|
||||
|
||||
def test_attributes():
|
||||
A = interface.aslinearoperator(np.arange(16).reshape(4, 4))
|
||||
|
||||
def always_four_ones(x):
|
||||
x = np.asarray(x)
|
||||
assert_(x.shape == (3,) or x.shape == (3, 1))
|
||||
return np.ones(4)
|
||||
|
||||
B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones)
|
||||
|
||||
for op in [A, B, A * B, A.H, A + A, B + B, A**4]:
|
||||
assert_(hasattr(op, "dtype"))
|
||||
assert_(hasattr(op, "shape"))
|
||||
assert_(hasattr(op, "_matvec"))
|
||||
|
||||
def matvec(x):
|
||||
""" Needed for test_pickle as local functions are not pickleable """
|
||||
return np.zeros(3)
|
||||
|
||||
def test_pickle():
|
||||
import pickle
|
||||
|
||||
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
|
||||
A = interface.LinearOperator((3, 3), matvec)
|
||||
s = pickle.dumps(A, protocol=protocol)
|
||||
B = pickle.loads(s)
|
||||
|
||||
for k in A.__dict__:
|
||||
assert_equal(getattr(A, k), getattr(B, k))
|
||||
|
||||
def test_inheritance():
|
||||
class Empty(interface.LinearOperator):
|
||||
pass
|
||||
|
||||
with warns(RuntimeWarning, match="should implement at least"):
|
||||
assert_raises(TypeError, Empty)
|
||||
|
||||
class Identity(interface.LinearOperator):
|
||||
def __init__(self, n):
|
||||
super().__init__(dtype=None, shape=(n, n))
|
||||
|
||||
def _matvec(self, x):
|
||||
return x
|
||||
|
||||
id3 = Identity(3)
|
||||
assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3])
|
||||
assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6])
|
||||
|
||||
class MatmatOnly(interface.LinearOperator):
|
||||
def __init__(self, A):
|
||||
super().__init__(A.dtype, A.shape)
|
||||
self.A = A
|
||||
|
||||
def _matmat(self, x):
|
||||
return self.A.dot(x)
|
||||
|
||||
mm = MatmatOnly(np.random.randn(5, 3))
|
||||
assert_equal(mm.matvec(np.random.randn(3)).shape, (5,))
|
||||
|
||||
def test_dtypes_of_operator_sum():
|
||||
# gh-6078
|
||||
|
||||
mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2)
|
||||
mat_real = np.random.rand(2,2)
|
||||
|
||||
complex_operator = interface.aslinearoperator(mat_complex)
|
||||
real_operator = interface.aslinearoperator(mat_real)
|
||||
|
||||
sum_complex = complex_operator + complex_operator
|
||||
sum_real = real_operator + real_operator
|
||||
|
||||
assert_equal(sum_real.dtype, np.float64)
|
||||
assert_equal(sum_complex.dtype, np.complex128)
|
||||
|
||||
def test_no_double_init():
|
||||
call_count = [0]
|
||||
|
||||
def matvec(v):
|
||||
call_count[0] += 1
|
||||
return v
|
||||
|
||||
# It should call matvec exactly once (in order to determine the
|
||||
# operator dtype)
|
||||
interface.LinearOperator((2, 2), matvec=matvec)
|
||||
assert_equal(call_count[0], 1)
|
||||
|
||||
def test_adjoint_conjugate():
|
||||
X = np.array([[1j]])
|
||||
A = interface.aslinearoperator(X)
|
||||
|
||||
B = 1j * A
|
||||
Y = 1j * X
|
||||
|
||||
v = np.array([1])
|
||||
|
||||
assert_equal(B.dot(v), Y.dot(v))
|
||||
assert_equal(B.H.dot(v), Y.T.conj().dot(v))
|
||||
|
||||
def test_ndim():
|
||||
X = np.array([[1]])
|
||||
A = interface.aslinearoperator(X)
|
||||
assert_equal(A.ndim, 2)
|
||||
|
||||
def test_transpose_noconjugate():
|
||||
X = np.array([[1j]])
|
||||
A = interface.aslinearoperator(X)
|
||||
|
||||
B = 1j * A
|
||||
Y = 1j * X
|
||||
|
||||
v = np.array([1])
|
||||
|
||||
assert_equal(B.dot(v), Y.dot(v))
|
||||
assert_equal(B.T.dot(v), Y.T.dot(v))
|
||||
|
||||
def test_sparse_matmat_exception():
|
||||
A = interface.LinearOperator((2, 2), matvec=lambda x: x)
|
||||
B = sparse.identity(2)
|
||||
msg = "Unable to multiply a LinearOperator with a sparse matrix."
|
||||
with assert_raises(TypeError, match=msg):
|
||||
A @ B
|
||||
with assert_raises(TypeError, match=msg):
|
||||
B @ A
|
||||
with assert_raises(ValueError):
|
||||
A @ np.identity(4)
|
||||
with assert_raises(ValueError):
|
||||
np.identity(4) @ A
|
||||
@ -0,0 +1,592 @@
|
||||
#
|
||||
# Created by: Pearu Peterson, March 2002
|
||||
#
|
||||
""" Test functions for scipy.linalg._matfuncs module
|
||||
|
||||
"""
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
from numpy import array, eye, exp, random
|
||||
from numpy.testing import (
|
||||
assert_allclose, assert_, assert_array_almost_equal, assert_equal,
|
||||
assert_array_almost_equal_nulp, suppress_warnings)
|
||||
|
||||
from scipy.sparse import csc_matrix, csc_array, SparseEfficiencyWarning
|
||||
from scipy.sparse._construct import eye as speye
|
||||
from scipy.sparse.linalg._matfuncs import (expm, _expm,
|
||||
ProductOperator, MatrixPowerOperator,
|
||||
_onenorm_matrix_power_nnm, matrix_power)
|
||||
from scipy.sparse._sputils import matrix
|
||||
from scipy.linalg import logm
|
||||
from scipy.special import factorial, binom
|
||||
import scipy.sparse
|
||||
import scipy.sparse.linalg
|
||||
|
||||
|
||||
def _burkardt_13_power(n, p):
|
||||
"""
|
||||
A helper function for testing matrix functions.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n : integer greater than 1
|
||||
Order of the square matrix to be returned.
|
||||
p : non-negative integer
|
||||
Power of the matrix.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray representing a square matrix
|
||||
A Forsythe matrix of order n, raised to the power p.
|
||||
|
||||
"""
|
||||
# Input validation.
|
||||
if n != int(n) or n < 2:
|
||||
raise ValueError('n must be an integer greater than 1')
|
||||
n = int(n)
|
||||
if p != int(p) or p < 0:
|
||||
raise ValueError('p must be a non-negative integer')
|
||||
p = int(p)
|
||||
|
||||
# Construct the matrix explicitly.
|
||||
a, b = divmod(p, n)
|
||||
large = np.power(10.0, -n*a)
|
||||
small = large * np.power(10.0, -n)
|
||||
return np.diag([large]*(n-b), b) + np.diag([small]*b, b-n)
|
||||
|
||||
|
||||
def test_onenorm_matrix_power_nnm():
|
||||
np.random.seed(1234)
|
||||
for n in range(1, 5):
|
||||
for p in range(5):
|
||||
M = np.random.random((n, n))
|
||||
Mp = np.linalg.matrix_power(M, p)
|
||||
observed = _onenorm_matrix_power_nnm(M, p)
|
||||
expected = np.linalg.norm(Mp, 1)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
def test_matrix_power():
|
||||
np.random.seed(1234)
|
||||
row, col = np.random.randint(0, 4, size=(2, 6))
|
||||
data = np.random.random(size=(6,))
|
||||
Amat = csc_matrix((data, (row, col)), shape=(4, 4))
|
||||
A = csc_array((data, (row, col)), shape=(4, 4))
|
||||
Adense = A.toarray()
|
||||
for power in (2, 5, 6):
|
||||
Apow = matrix_power(A, power).toarray()
|
||||
Amat_pow = (Amat**power).toarray()
|
||||
Adense_pow = np.linalg.matrix_power(Adense, power)
|
||||
assert_allclose(Apow, Adense_pow)
|
||||
assert_allclose(Apow, Amat_pow)
|
||||
|
||||
|
||||
class TestExpM:
|
||||
def test_zero_ndarray(self):
|
||||
a = array([[0.,0],[0,0]])
|
||||
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
|
||||
|
||||
def test_zero_sparse(self):
|
||||
a = csc_matrix([[0.,0],[0,0]])
|
||||
assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]])
|
||||
|
||||
def test_zero_matrix(self):
|
||||
a = matrix([[0.,0],[0,0]])
|
||||
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
|
||||
|
||||
def test_misc_types(self):
|
||||
A = expm(np.array([[1]]))
|
||||
assert_allclose(expm(((1,),)), A)
|
||||
assert_allclose(expm([[1]]), A)
|
||||
assert_allclose(expm(matrix([[1]])), A)
|
||||
assert_allclose(expm(np.array([[1]])), A)
|
||||
assert_allclose(expm(csc_matrix([[1]])).toarray(), A)
|
||||
B = expm(np.array([[1j]]))
|
||||
assert_allclose(expm(((1j,),)), B)
|
||||
assert_allclose(expm([[1j]]), B)
|
||||
assert_allclose(expm(matrix([[1j]])), B)
|
||||
assert_allclose(expm(csc_matrix([[1j]])).toarray(), B)
|
||||
|
||||
def test_bidiagonal_sparse(self):
|
||||
A = csc_matrix([
|
||||
[1, 3, 0],
|
||||
[0, 1, 5],
|
||||
[0, 0, 2]], dtype=float)
|
||||
e1 = math.exp(1)
|
||||
e2 = math.exp(2)
|
||||
expected = np.array([
|
||||
[e1, 3*e1, 15*(e2 - 2*e1)],
|
||||
[0, e1, 5*(e2 - e1)],
|
||||
[0, 0, e2]], dtype=float)
|
||||
observed = expm(A).toarray()
|
||||
assert_array_almost_equal(observed, expected)
|
||||
|
||||
def test_padecases_dtype_float(self):
|
||||
for dtype in [np.float32, np.float64]:
|
||||
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
|
||||
A = scale * eye(3, dtype=dtype)
|
||||
observed = expm(A)
|
||||
expected = exp(scale, dtype=dtype) * eye(3, dtype=dtype)
|
||||
assert_array_almost_equal_nulp(observed, expected, nulp=100)
|
||||
|
||||
def test_padecases_dtype_complex(self):
|
||||
for dtype in [np.complex64, np.complex128]:
|
||||
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
|
||||
A = scale * eye(3, dtype=dtype)
|
||||
observed = expm(A)
|
||||
expected = exp(scale, dtype=dtype) * eye(3, dtype=dtype)
|
||||
assert_array_almost_equal_nulp(observed, expected, nulp=100)
|
||||
|
||||
def test_padecases_dtype_sparse_float(self):
|
||||
# float32 and complex64 lead to errors in spsolve/UMFpack
|
||||
dtype = np.float64
|
||||
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
|
||||
a = scale * speye(3, 3, dtype=dtype, format='csc')
|
||||
e = exp(scale, dtype=dtype) * eye(3, dtype=dtype)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure")
|
||||
exact_onenorm = _expm(a, use_exact_onenorm=True).toarray()
|
||||
inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray()
|
||||
assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100)
|
||||
assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100)
|
||||
|
||||
def test_padecases_dtype_sparse_complex(self):
|
||||
# float32 and complex64 lead to errors in spsolve/UMFpack
|
||||
dtype = np.complex128
|
||||
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
|
||||
a = scale * speye(3, 3, dtype=dtype, format='csc')
|
||||
e = exp(scale) * eye(3, dtype=dtype)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure")
|
||||
assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)
|
||||
|
||||
def test_logm_consistency(self):
|
||||
random.seed(1234)
|
||||
for dtype in [np.float64, np.complex128]:
|
||||
for n in range(1, 10):
|
||||
for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]:
|
||||
# make logm(A) be of a given scale
|
||||
A = (eye(n) + random.rand(n, n) * scale).astype(dtype)
|
||||
if np.iscomplexobj(A):
|
||||
A = A + 1j * random.rand(n, n) * scale
|
||||
assert_array_almost_equal(expm(logm(A)), A)
|
||||
|
||||
def test_integer_matrix(self):
|
||||
Q = np.array([
|
||||
[-3, 1, 1, 1],
|
||||
[1, -3, 1, 1],
|
||||
[1, 1, -3, 1],
|
||||
[1, 1, 1, -3]])
|
||||
assert_allclose(expm(Q), expm(1.0 * Q))
|
||||
|
||||
def test_integer_matrix_2(self):
|
||||
# Check for integer overflows
|
||||
Q = np.array([[-500, 500, 0, 0],
|
||||
[0, -550, 360, 190],
|
||||
[0, 630, -630, 0],
|
||||
[0, 0, 0, 0]], dtype=np.int16)
|
||||
assert_allclose(expm(Q), expm(1.0 * Q))
|
||||
|
||||
Q = csc_matrix(Q)
|
||||
assert_allclose(expm(Q).toarray(), expm(1.0 * Q).toarray())
|
||||
|
||||
def test_triangularity_perturbation(self):
|
||||
# Experiment (1) of
|
||||
# Awad H. Al-Mohy and Nicholas J. Higham (2012)
|
||||
# Improved Inverse Scaling and Squaring Algorithms
|
||||
# for the Matrix Logarithm.
|
||||
A = np.array([
|
||||
[3.2346e-1, 3e4, 3e4, 3e4],
|
||||
[0, 3.0089e-1, 3e4, 3e4],
|
||||
[0, 0, 3.221e-1, 3e4],
|
||||
[0, 0, 0, 3.0744e-1]],
|
||||
dtype=float)
|
||||
A_logm = np.array([
|
||||
[-1.12867982029050462e+00, 9.61418377142025565e+04,
|
||||
-4.52485573953179264e+09, 2.92496941103871812e+14],
|
||||
[0.00000000000000000e+00, -1.20101052953082288e+00,
|
||||
9.63469687211303099e+04, -4.68104828911105442e+09],
|
||||
[0.00000000000000000e+00, 0.00000000000000000e+00,
|
||||
-1.13289322264498393e+00, 9.53249183094775653e+04],
|
||||
[0.00000000000000000e+00, 0.00000000000000000e+00,
|
||||
0.00000000000000000e+00, -1.17947533272554850e+00]],
|
||||
dtype=float)
|
||||
assert_allclose(expm(A_logm), A, rtol=1e-4)
|
||||
|
||||
# Perturb the upper triangular matrix by tiny amounts,
|
||||
# so that it becomes technically not upper triangular.
|
||||
random.seed(1234)
|
||||
tiny = 1e-17
|
||||
A_logm_perturbed = A_logm.copy()
|
||||
A_logm_perturbed[1, 0] = tiny
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning, "Ill-conditioned.*")
|
||||
A_expm_logm_perturbed = expm(A_logm_perturbed)
|
||||
rtol = 1e-4
|
||||
atol = 100 * tiny
|
||||
assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol))
|
||||
|
||||
def test_burkardt_1(self):
|
||||
# This matrix is diagonal.
|
||||
# The calculation of the matrix exponential is simple.
|
||||
#
|
||||
# This is the first of a series of matrix exponential tests
|
||||
# collected by John Burkardt from the following sources.
|
||||
#
|
||||
# Alan Laub,
|
||||
# Review of "Linear System Theory" by Joao Hespanha,
|
||||
# SIAM Review,
|
||||
# Volume 52, Number 4, December 2010, pages 779--781.
|
||||
#
|
||||
# Cleve Moler and Charles Van Loan,
|
||||
# Nineteen Dubious Ways to Compute the Exponential of a Matrix,
|
||||
# Twenty-Five Years Later,
|
||||
# SIAM Review,
|
||||
# Volume 45, Number 1, March 2003, pages 3--49.
|
||||
#
|
||||
# Cleve Moler,
|
||||
# Cleve's Corner: A Balancing Act for the Matrix Exponential,
|
||||
# 23 July 2012.
|
||||
#
|
||||
# Robert Ward,
|
||||
# Numerical computation of the matrix exponential
|
||||
# with accuracy estimate,
|
||||
# SIAM Journal on Numerical Analysis,
|
||||
# Volume 14, Number 4, September 1977, pages 600--610.
|
||||
exp1 = np.exp(1)
|
||||
exp2 = np.exp(2)
|
||||
A = np.array([
|
||||
[1, 0],
|
||||
[0, 2],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[exp1, 0],
|
||||
[0, exp2],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_2(self):
|
||||
# This matrix is symmetric.
|
||||
# The calculation of the matrix exponential is straightforward.
|
||||
A = np.array([
|
||||
[1, 3],
|
||||
[3, 2],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[39.322809708033859, 46.166301438885753],
|
||||
[46.166301438885768, 54.711576854329110],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_3(self):
|
||||
# This example is due to Laub.
|
||||
# This matrix is ill-suited for the Taylor series approach.
|
||||
# As powers of A are computed, the entries blow up too quickly.
|
||||
exp1 = np.exp(1)
|
||||
exp39 = np.exp(39)
|
||||
A = np.array([
|
||||
[0, 1],
|
||||
[-39, -40],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[
|
||||
39/(38*exp1) - 1/(38*exp39),
|
||||
-np.expm1(-38) / (38*exp1)],
|
||||
[
|
||||
39*np.expm1(-38) / (38*exp1),
|
||||
-1/(38*exp1) + 39/(38*exp39)],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_4(self):
|
||||
# This example is due to Moler and Van Loan.
|
||||
# The example will cause problems for the series summation approach,
|
||||
# as well as for diagonal Pade approximations.
|
||||
A = np.array([
|
||||
[-49, 24],
|
||||
[-64, 31],
|
||||
], dtype=float)
|
||||
U = np.array([[3, 1], [4, 2]], dtype=float)
|
||||
V = np.array([[1, -1/2], [-2, 3/2]], dtype=float)
|
||||
w = np.array([-17, -1], dtype=float)
|
||||
desired = np.dot(U * np.exp(w), V)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_5(self):
|
||||
# This example is due to Moler and Van Loan.
|
||||
# This matrix is strictly upper triangular
|
||||
# All powers of A are zero beyond some (low) limit.
|
||||
# This example will cause problems for Pade approximations.
|
||||
A = np.array([
|
||||
[0, 6, 0, 0],
|
||||
[0, 0, 6, 0],
|
||||
[0, 0, 0, 6],
|
||||
[0, 0, 0, 0],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[1, 6, 18, 36],
|
||||
[0, 1, 6, 18],
|
||||
[0, 0, 1, 6],
|
||||
[0, 0, 0, 1],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_6(self):
|
||||
# This example is due to Moler and Van Loan.
|
||||
# This matrix does not have a complete set of eigenvectors.
|
||||
# That means the eigenvector approach will fail.
|
||||
exp1 = np.exp(1)
|
||||
A = np.array([
|
||||
[1, 1],
|
||||
[0, 1],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[exp1, exp1],
|
||||
[0, exp1],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_7(self):
|
||||
# This example is due to Moler and Van Loan.
|
||||
# This matrix is very close to example 5.
|
||||
# Mathematically, it has a complete set of eigenvectors.
|
||||
# Numerically, however, the calculation will be suspect.
|
||||
exp1 = np.exp(1)
|
||||
eps = np.spacing(1)
|
||||
A = np.array([
|
||||
[1 + eps, 1],
|
||||
[0, 1 - eps],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[exp1, exp1],
|
||||
[0, exp1],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_8(self):
|
||||
# This matrix was an example in Wikipedia.
|
||||
exp4 = np.exp(4)
|
||||
exp16 = np.exp(16)
|
||||
A = np.array([
|
||||
[21, 17, 6],
|
||||
[-5, -1, -6],
|
||||
[4, 4, 16],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4],
|
||||
[-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4],
|
||||
[16*exp16, 16*exp16, 4*exp16],
|
||||
], dtype=float) * 0.25
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_9(self):
|
||||
# This matrix is due to the NAG Library.
|
||||
# It is an example for function F01ECF.
|
||||
A = np.array([
|
||||
[1, 2, 2, 2],
|
||||
[3, 1, 1, 2],
|
||||
[3, 2, 1, 2],
|
||||
[3, 3, 3, 1],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[740.7038, 610.8500, 542.2743, 549.1753],
|
||||
[731.2510, 603.5524, 535.0884, 542.2743],
|
||||
[823.7630, 679.4257, 603.5524, 610.8500],
|
||||
[998.4355, 823.7630, 731.2510, 740.7038],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_10(self):
|
||||
# This is Ward's example #1.
|
||||
# It is defective and nonderogatory.
|
||||
A = np.array([
|
||||
[4, 2, 0],
|
||||
[1, 4, 1],
|
||||
[1, 1, 4],
|
||||
], dtype=float)
|
||||
assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6))
|
||||
desired = np.array([
|
||||
[147.8666224463699, 183.7651386463682, 71.79703239999647],
|
||||
[127.7810855231823, 183.7651386463682, 91.88256932318415],
|
||||
[127.7810855231824, 163.6796017231806, 111.9681062463718],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_11(self):
|
||||
# This is Ward's example #2.
|
||||
# It is a symmetric matrix.
|
||||
A = np.array([
|
||||
[29.87942128909879, 0.7815750847907159, -2.289519314033932],
|
||||
[0.7815750847907159, 25.72656945571064, 8.680737820540137],
|
||||
[-2.289519314033932, 8.680737820540137, 34.39400925519054],
|
||||
], dtype=float)
|
||||
assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40))
|
||||
desired = np.array([
|
||||
[
|
||||
5.496313853692378E+15,
|
||||
-1.823188097200898E+16,
|
||||
-3.047577080858001E+16],
|
||||
[
|
||||
-1.823188097200899E+16,
|
||||
6.060522870222108E+16,
|
||||
1.012918429302482E+17],
|
||||
[
|
||||
-3.047577080858001E+16,
|
||||
1.012918429302482E+17,
|
||||
1.692944112408493E+17],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_12(self):
|
||||
# This is Ward's example #3.
|
||||
# Ward's algorithm has difficulty estimating the accuracy
|
||||
# of its results.
|
||||
A = np.array([
|
||||
[-131, 19, 18],
|
||||
[-390, 56, 54],
|
||||
[-387, 57, 52],
|
||||
], dtype=float)
|
||||
assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1))
|
||||
desired = np.array([
|
||||
[-1.509644158793135, 0.3678794391096522, 0.1353352811751005],
|
||||
[-5.632570799891469, 1.471517758499875, 0.4060058435250609],
|
||||
[-4.934938326088363, 1.103638317328798, 0.5413411267617766],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_13(self):
|
||||
# This is Ward's example #4.
|
||||
# This is a version of the Forsythe matrix.
|
||||
# The eigenvector problem is badly conditioned.
|
||||
# Ward's algorithm has difficulty estimating the accuracy
|
||||
# of its results for this problem.
|
||||
#
|
||||
# Check the construction of one instance of this family of matrices.
|
||||
A4_actual = _burkardt_13_power(4, 1)
|
||||
A4_desired = [[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1],
|
||||
[1e-4, 0, 0, 0]]
|
||||
assert_allclose(A4_actual, A4_desired)
|
||||
# Check the expm for a few instances.
|
||||
for n in (2, 3, 4, 10):
|
||||
# Approximate expm using Taylor series.
|
||||
# This works well for this matrix family
|
||||
# because each matrix in the summation,
|
||||
# even before dividing by the factorial,
|
||||
# is entrywise positive with max entry 10**(-floor(p/n)*n).
|
||||
k = max(1, int(np.ceil(16/n)))
|
||||
desired = np.zeros((n, n), dtype=float)
|
||||
for p in range(n*k):
|
||||
Ap = _burkardt_13_power(n, p)
|
||||
assert_equal(np.min(Ap), 0)
|
||||
assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n))
|
||||
desired += Ap / factorial(p)
|
||||
actual = expm(_burkardt_13_power(n, 1))
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_14(self):
|
||||
# This is Moler's example.
|
||||
# This badly scaled matrix caused problems for MATLAB's expm().
|
||||
A = np.array([
|
||||
[0, 1e-8, 0],
|
||||
[-(2e10 + 4e8/6.), -3, 2e10],
|
||||
[200./3., 0, -200./3.],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[0.446849468283175, 1.54044157383952e-09, 0.462811453558774],
|
||||
[-5743067.77947947, -0.0152830038686819, -4526542.71278401],
|
||||
[0.447722977849494, 1.54270484519591e-09, 0.463480648837651],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_pascal(self):
|
||||
# Test pascal triangle.
|
||||
# Nilpotent exponential, used to trigger a failure (gh-8029)
|
||||
|
||||
for scale in [1.0, 1e-3, 1e-6]:
|
||||
for n in range(0, 80, 3):
|
||||
sc = scale ** np.arange(n, -1, -1)
|
||||
if np.any(sc < 1e-300):
|
||||
break
|
||||
|
||||
A = np.diag(np.arange(1, n + 1), -1) * scale
|
||||
B = expm(A)
|
||||
|
||||
got = B
|
||||
expected = binom(np.arange(n + 1)[:,None],
|
||||
np.arange(n + 1)[None,:]) * sc[None,:] / sc[:,None]
|
||||
atol = 1e-13 * abs(expected).max()
|
||||
assert_allclose(got, expected, atol=atol)
|
||||
|
||||
def test_matrix_input(self):
|
||||
# Large np.matrix inputs should work, gh-5546
|
||||
A = np.zeros((200, 200))
|
||||
A[-1,0] = 1
|
||||
B0 = expm(A)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, "the matrix subclass.*")
|
||||
sup.filter(PendingDeprecationWarning, "the matrix subclass.*")
|
||||
B = expm(np.matrix(A))
|
||||
assert_allclose(B, B0)
|
||||
|
||||
def test_exp_sinch_overflow(self):
|
||||
# Check overflow in intermediate steps is fixed (gh-11839)
|
||||
L = np.array([[1.0, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0],
|
||||
[0.0, 1.0, 0.0, -0.5, -0.5, 0.0, 0.0],
|
||||
[0.0, 0.0, 1.0, 0.0, 0.0, -0.5, -0.5],
|
||||
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
||||
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
||||
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
||||
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
|
||||
|
||||
E0 = expm(-L)
|
||||
E1 = expm(-2**11 * L)
|
||||
E2 = E0
|
||||
for j in range(11):
|
||||
E2 = E2 @ E2
|
||||
|
||||
assert_allclose(E1, E2)
|
||||
|
||||
|
||||
class TestOperators:
|
||||
|
||||
def test_product_operator(self):
|
||||
random.seed(1234)
|
||||
n = 5
|
||||
k = 2
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
A = np.random.randn(n, n)
|
||||
B = np.random.randn(n, n)
|
||||
C = np.random.randn(n, n)
|
||||
D = np.random.randn(n, k)
|
||||
op = ProductOperator(A, B, C)
|
||||
assert_allclose(op.matmat(D), A.dot(B).dot(C).dot(D))
|
||||
assert_allclose(op.T.matmat(D), (A.dot(B).dot(C)).T.dot(D))
|
||||
|
||||
def test_matrix_power_operator(self):
|
||||
random.seed(1234)
|
||||
n = 5
|
||||
k = 2
|
||||
p = 3
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
A = np.random.randn(n, n)
|
||||
B = np.random.randn(n, k)
|
||||
op = MatrixPowerOperator(A, p)
|
||||
assert_allclose(op.matmat(B), np.linalg.matrix_power(A, p).dot(B))
|
||||
assert_allclose(op.T.matmat(B), np.linalg.matrix_power(A, p).T.dot(B))
|
||||
@ -0,0 +1,141 @@
|
||||
"""Test functions for the sparse.linalg.norm module
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import numpy as np
|
||||
from numpy.linalg import norm as npnorm
|
||||
from numpy.testing import assert_allclose, assert_equal
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
import scipy.sparse
|
||||
from scipy.sparse.linalg import norm as spnorm
|
||||
|
||||
|
||||
# https://github.com/scipy/scipy/issues/16031
|
||||
def test_sparray_norm():
|
||||
row = np.array([0, 0, 1, 1])
|
||||
col = np.array([0, 1, 2, 3])
|
||||
data = np.array([4, 5, 7, 9])
|
||||
test_arr = scipy.sparse.coo_array((data, (row, col)), shape=(2, 4))
|
||||
test_mat = scipy.sparse.coo_matrix((data, (row, col)), shape=(2, 4))
|
||||
assert_equal(spnorm(test_arr, ord=1, axis=0), np.array([4, 5, 7, 9]))
|
||||
assert_equal(spnorm(test_mat, ord=1, axis=0), np.array([4, 5, 7, 9]))
|
||||
assert_equal(spnorm(test_arr, ord=1, axis=1), np.array([9, 16]))
|
||||
assert_equal(spnorm(test_mat, ord=1, axis=1), np.array([9, 16]))
|
||||
|
||||
|
||||
class TestNorm:
|
||||
def setup_method(self):
|
||||
a = np.arange(9) - 4
|
||||
b = a.reshape((3, 3))
|
||||
self.b = scipy.sparse.csr_matrix(b)
|
||||
|
||||
def test_matrix_norm(self):
|
||||
|
||||
# Frobenius norm is the default
|
||||
assert_allclose(spnorm(self.b), 7.745966692414834)
|
||||
assert_allclose(spnorm(self.b, 'fro'), 7.745966692414834)
|
||||
|
||||
assert_allclose(spnorm(self.b, np.inf), 9)
|
||||
assert_allclose(spnorm(self.b, -np.inf), 2)
|
||||
assert_allclose(spnorm(self.b, 1), 7)
|
||||
assert_allclose(spnorm(self.b, -1), 6)
|
||||
# Only floating or complex floating dtype supported by svds.
|
||||
with pytest.warns(UserWarning, match="The problem size"):
|
||||
assert_allclose(spnorm(self.b.astype(np.float64), 2),
|
||||
7.348469228349534)
|
||||
|
||||
# _multi_svd_norm is not implemented for sparse matrix
|
||||
assert_raises(NotImplementedError, spnorm, self.b, -2)
|
||||
|
||||
def test_matrix_norm_axis(self):
|
||||
for m, axis in ((self.b, None), (self.b, (0, 1)), (self.b.T, (1, 0))):
|
||||
assert_allclose(spnorm(m, axis=axis), 7.745966692414834)
|
||||
assert_allclose(spnorm(m, 'fro', axis=axis), 7.745966692414834)
|
||||
assert_allclose(spnorm(m, np.inf, axis=axis), 9)
|
||||
assert_allclose(spnorm(m, -np.inf, axis=axis), 2)
|
||||
assert_allclose(spnorm(m, 1, axis=axis), 7)
|
||||
assert_allclose(spnorm(m, -1, axis=axis), 6)
|
||||
|
||||
def test_vector_norm(self):
|
||||
v = [4.5825756949558398, 4.2426406871192848, 4.5825756949558398]
|
||||
for m, a in (self.b, 0), (self.b.T, 1):
|
||||
for axis in a, (a, ), a-2, (a-2, ):
|
||||
assert_allclose(spnorm(m, 1, axis=axis), [7, 6, 7])
|
||||
assert_allclose(spnorm(m, np.inf, axis=axis), [4, 3, 4])
|
||||
assert_allclose(spnorm(m, axis=axis), v)
|
||||
assert_allclose(spnorm(m, ord=2, axis=axis), v)
|
||||
assert_allclose(spnorm(m, ord=None, axis=axis), v)
|
||||
|
||||
def test_norm_exceptions(self):
|
||||
m = self.b
|
||||
assert_raises(TypeError, spnorm, m, None, 1.5)
|
||||
assert_raises(TypeError, spnorm, m, None, [2])
|
||||
assert_raises(ValueError, spnorm, m, None, ())
|
||||
assert_raises(ValueError, spnorm, m, None, (0, 1, 2))
|
||||
assert_raises(ValueError, spnorm, m, None, (0, 0))
|
||||
assert_raises(ValueError, spnorm, m, None, (0, 2))
|
||||
assert_raises(ValueError, spnorm, m, None, (-3, 0))
|
||||
assert_raises(ValueError, spnorm, m, None, 2)
|
||||
assert_raises(ValueError, spnorm, m, None, -3)
|
||||
assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', 0)
|
||||
assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', (0, 1))
|
||||
|
||||
|
||||
class TestVsNumpyNorm:
|
||||
_sparse_types = (
|
||||
scipy.sparse.bsr_matrix,
|
||||
scipy.sparse.coo_matrix,
|
||||
scipy.sparse.csc_matrix,
|
||||
scipy.sparse.csr_matrix,
|
||||
scipy.sparse.dia_matrix,
|
||||
scipy.sparse.dok_matrix,
|
||||
scipy.sparse.lil_matrix,
|
||||
)
|
||||
_test_matrices = (
|
||||
(np.arange(9) - 4).reshape((3, 3)),
|
||||
[
|
||||
[1, 2, 3],
|
||||
[-1, 1, 4]],
|
||||
[
|
||||
[1, 0, 3],
|
||||
[-1, 1, 4j]],
|
||||
)
|
||||
|
||||
def test_sparse_matrix_norms(self):
|
||||
for sparse_type in self._sparse_types:
|
||||
for M in self._test_matrices:
|
||||
S = sparse_type(M)
|
||||
assert_allclose(spnorm(S), npnorm(M))
|
||||
assert_allclose(spnorm(S, 'fro'), npnorm(M, 'fro'))
|
||||
assert_allclose(spnorm(S, np.inf), npnorm(M, np.inf))
|
||||
assert_allclose(spnorm(S, -np.inf), npnorm(M, -np.inf))
|
||||
assert_allclose(spnorm(S, 1), npnorm(M, 1))
|
||||
assert_allclose(spnorm(S, -1), npnorm(M, -1))
|
||||
|
||||
def test_sparse_matrix_norms_with_axis(self):
|
||||
for sparse_type in self._sparse_types:
|
||||
for M in self._test_matrices:
|
||||
S = sparse_type(M)
|
||||
for axis in None, (0, 1), (1, 0):
|
||||
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
|
||||
for ord in 'fro', np.inf, -np.inf, 1, -1:
|
||||
assert_allclose(spnorm(S, ord, axis=axis),
|
||||
npnorm(M, ord, axis=axis))
|
||||
# Some numpy matrix norms are allergic to negative axes.
|
||||
for axis in (-2, -1), (-1, -2), (1, -2):
|
||||
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
|
||||
assert_allclose(spnorm(S, 'f', axis=axis),
|
||||
npnorm(M, 'f', axis=axis))
|
||||
assert_allclose(spnorm(S, 'fro', axis=axis),
|
||||
npnorm(M, 'fro', axis=axis))
|
||||
|
||||
def test_sparse_vector_norms(self):
|
||||
for sparse_type in self._sparse_types:
|
||||
for M in self._test_matrices:
|
||||
S = sparse_type(M)
|
||||
for axis in (0, 1, -1, -2, (0, ), (1, ), (-1, ), (-2, )):
|
||||
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
|
||||
for ord in None, 2, np.inf, -np.inf, 1, 0.5, 0.42:
|
||||
assert_allclose(spnorm(S, ord, axis=axis),
|
||||
npnorm(M, ord, axis=axis))
|
||||
@ -0,0 +1,252 @@
|
||||
"""Test functions for the sparse.linalg._onenormest module
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose, assert_equal, assert_
|
||||
import pytest
|
||||
import scipy.linalg
|
||||
import scipy.sparse.linalg
|
||||
from scipy.sparse.linalg._onenormest import _onenormest_core, _algorithm_2_2
|
||||
|
||||
|
||||
class MatrixProductOperator(scipy.sparse.linalg.LinearOperator):
|
||||
"""
|
||||
This is purely for onenormest testing.
|
||||
"""
|
||||
|
||||
def __init__(self, A, B):
|
||||
if A.ndim != 2 or B.ndim != 2:
|
||||
raise ValueError('expected ndarrays representing matrices')
|
||||
if A.shape[1] != B.shape[0]:
|
||||
raise ValueError('incompatible shapes')
|
||||
self.A = A
|
||||
self.B = B
|
||||
self.ndim = 2
|
||||
self.shape = (A.shape[0], B.shape[1])
|
||||
|
||||
def _matvec(self, x):
|
||||
return np.dot(self.A, np.dot(self.B, x))
|
||||
|
||||
def _rmatvec(self, x):
|
||||
return np.dot(np.dot(x, self.A), self.B)
|
||||
|
||||
def _matmat(self, X):
|
||||
return np.dot(self.A, np.dot(self.B, X))
|
||||
|
||||
@property
|
||||
def T(self):
|
||||
return MatrixProductOperator(self.B.T, self.A.T)
|
||||
|
||||
|
||||
class TestOnenormest:
|
||||
|
||||
@pytest.mark.xslow
|
||||
def test_onenormest_table_3_t_2(self):
|
||||
# This will take multiple seconds if your computer is slow like mine.
|
||||
# It is stochastic, so the tolerance could be too strict.
|
||||
np.random.seed(1234)
|
||||
t = 2
|
||||
n = 100
|
||||
itmax = 5
|
||||
nsamples = 5000
|
||||
observed = []
|
||||
expected = []
|
||||
nmult_list = []
|
||||
nresample_list = []
|
||||
for i in range(nsamples):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
|
||||
observed.append(est)
|
||||
expected.append(scipy.linalg.norm(A, 1))
|
||||
nmult_list.append(nmults)
|
||||
nresample_list.append(nresamples)
|
||||
observed = np.array(observed, dtype=float)
|
||||
expected = np.array(expected, dtype=float)
|
||||
relative_errors = np.abs(observed - expected) / expected
|
||||
|
||||
# check the mean underestimation ratio
|
||||
underestimation_ratio = observed / expected
|
||||
assert_(0.99 < np.mean(underestimation_ratio) < 1.0)
|
||||
|
||||
# check the max and mean required column resamples
|
||||
assert_equal(np.max(nresample_list), 2)
|
||||
assert_(0.05 < np.mean(nresample_list) < 0.2)
|
||||
|
||||
# check the proportion of norms computed exactly correctly
|
||||
nexact = np.count_nonzero(relative_errors < 1e-14)
|
||||
proportion_exact = nexact / float(nsamples)
|
||||
assert_(0.9 < proportion_exact < 0.95)
|
||||
|
||||
# check the average number of matrix*vector multiplications
|
||||
assert_(3.5 < np.mean(nmult_list) < 4.5)
|
||||
|
||||
@pytest.mark.xslow
|
||||
def test_onenormest_table_4_t_7(self):
|
||||
# This will take multiple seconds if your computer is slow like mine.
|
||||
# It is stochastic, so the tolerance could be too strict.
|
||||
np.random.seed(1234)
|
||||
t = 7
|
||||
n = 100
|
||||
itmax = 5
|
||||
nsamples = 5000
|
||||
observed = []
|
||||
expected = []
|
||||
nmult_list = []
|
||||
nresample_list = []
|
||||
for i in range(nsamples):
|
||||
A = np.random.randint(-1, 2, size=(n, n))
|
||||
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
|
||||
observed.append(est)
|
||||
expected.append(scipy.linalg.norm(A, 1))
|
||||
nmult_list.append(nmults)
|
||||
nresample_list.append(nresamples)
|
||||
observed = np.array(observed, dtype=float)
|
||||
expected = np.array(expected, dtype=float)
|
||||
relative_errors = np.abs(observed - expected) / expected
|
||||
|
||||
# check the mean underestimation ratio
|
||||
underestimation_ratio = observed / expected
|
||||
assert_(0.90 < np.mean(underestimation_ratio) < 0.99)
|
||||
|
||||
# check the required column resamples
|
||||
assert_equal(np.max(nresample_list), 0)
|
||||
|
||||
# check the proportion of norms computed exactly correctly
|
||||
nexact = np.count_nonzero(relative_errors < 1e-14)
|
||||
proportion_exact = nexact / float(nsamples)
|
||||
assert_(0.15 < proportion_exact < 0.25)
|
||||
|
||||
# check the average number of matrix*vector multiplications
|
||||
assert_(3.5 < np.mean(nmult_list) < 4.5)
|
||||
|
||||
def test_onenormest_table_5_t_1(self):
|
||||
# "note that there is no randomness and hence only one estimate for t=1"
|
||||
t = 1
|
||||
n = 100
|
||||
itmax = 5
|
||||
alpha = 1 - 1e-6
|
||||
A = -scipy.linalg.inv(np.identity(n) + alpha*np.eye(n, k=1))
|
||||
first_col = np.array([1] + [0]*(n-1))
|
||||
first_row = np.array([(-alpha)**i for i in range(n)])
|
||||
B = -scipy.linalg.toeplitz(first_col, first_row)
|
||||
assert_allclose(A, B)
|
||||
est, v, w, nmults, nresamples = _onenormest_core(B, B.T, t, itmax)
|
||||
exact_value = scipy.linalg.norm(B, 1)
|
||||
underest_ratio = est / exact_value
|
||||
assert_allclose(underest_ratio, 0.05, rtol=1e-4)
|
||||
assert_equal(nmults, 11)
|
||||
assert_equal(nresamples, 0)
|
||||
# check the non-underscored version of onenormest
|
||||
est_plain = scipy.sparse.linalg.onenormest(B, t=t, itmax=itmax)
|
||||
assert_allclose(est, est_plain)
|
||||
|
||||
@pytest.mark.xslow
|
||||
def test_onenormest_table_6_t_1(self):
|
||||
#TODO this test seems to give estimates that match the table,
|
||||
#TODO even though no attempt has been made to deal with
|
||||
#TODO complex numbers in the one-norm estimation.
|
||||
# This will take multiple seconds if your computer is slow like mine.
|
||||
# It is stochastic, so the tolerance could be too strict.
|
||||
np.random.seed(1234)
|
||||
t = 1
|
||||
n = 100
|
||||
itmax = 5
|
||||
nsamples = 5000
|
||||
observed = []
|
||||
expected = []
|
||||
nmult_list = []
|
||||
nresample_list = []
|
||||
for i in range(nsamples):
|
||||
A_inv = np.random.rand(n, n) + 1j * np.random.rand(n, n)
|
||||
A = scipy.linalg.inv(A_inv)
|
||||
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
|
||||
observed.append(est)
|
||||
expected.append(scipy.linalg.norm(A, 1))
|
||||
nmult_list.append(nmults)
|
||||
nresample_list.append(nresamples)
|
||||
observed = np.array(observed, dtype=float)
|
||||
expected = np.array(expected, dtype=float)
|
||||
relative_errors = np.abs(observed - expected) / expected
|
||||
|
||||
# check the mean underestimation ratio
|
||||
underestimation_ratio = observed / expected
|
||||
underestimation_ratio_mean = np.mean(underestimation_ratio)
|
||||
assert_(0.90 < underestimation_ratio_mean < 0.99)
|
||||
|
||||
# check the required column resamples
|
||||
max_nresamples = np.max(nresample_list)
|
||||
assert_equal(max_nresamples, 0)
|
||||
|
||||
# check the proportion of norms computed exactly correctly
|
||||
nexact = np.count_nonzero(relative_errors < 1e-14)
|
||||
proportion_exact = nexact / float(nsamples)
|
||||
assert_(0.7 < proportion_exact < 0.8)
|
||||
|
||||
# check the average number of matrix*vector multiplications
|
||||
mean_nmult = np.mean(nmult_list)
|
||||
assert_(4 < mean_nmult < 5)
|
||||
|
||||
def _help_product_norm_slow(self, A, B):
|
||||
# for profiling
|
||||
C = np.dot(A, B)
|
||||
return scipy.linalg.norm(C, 1)
|
||||
|
||||
def _help_product_norm_fast(self, A, B):
|
||||
# for profiling
|
||||
t = 2
|
||||
itmax = 5
|
||||
D = MatrixProductOperator(A, B)
|
||||
est, v, w, nmults, nresamples = _onenormest_core(D, D.T, t, itmax)
|
||||
return est
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_onenormest_linear_operator(self):
|
||||
# Define a matrix through its product A B.
|
||||
# Depending on the shapes of A and B,
|
||||
# it could be easy to multiply this product by a small matrix,
|
||||
# but it could be annoying to look at all of
|
||||
# the entries of the product explicitly.
|
||||
np.random.seed(1234)
|
||||
n = 6000
|
||||
k = 3
|
||||
A = np.random.randn(n, k)
|
||||
B = np.random.randn(k, n)
|
||||
fast_estimate = self._help_product_norm_fast(A, B)
|
||||
exact_value = self._help_product_norm_slow(A, B)
|
||||
assert_(fast_estimate <= exact_value <= 3*fast_estimate,
|
||||
f'fast: {fast_estimate:g}\nexact:{exact_value:g}')
|
||||
|
||||
def test_returns(self):
|
||||
np.random.seed(1234)
|
||||
A = scipy.sparse.rand(50, 50, 0.1)
|
||||
|
||||
s0 = scipy.linalg.norm(A.toarray(), 1)
|
||||
s1, v = scipy.sparse.linalg.onenormest(A, compute_v=True)
|
||||
s2, w = scipy.sparse.linalg.onenormest(A, compute_w=True)
|
||||
s3, v2, w2 = scipy.sparse.linalg.onenormest(A, compute_w=True, compute_v=True)
|
||||
|
||||
assert_allclose(s1, s0, rtol=1e-9)
|
||||
assert_allclose(np.linalg.norm(A.dot(v), 1), s0*np.linalg.norm(v, 1), rtol=1e-9)
|
||||
assert_allclose(A.dot(v), w, rtol=1e-9)
|
||||
|
||||
|
||||
class TestAlgorithm_2_2:
|
||||
|
||||
def test_randn_inv(self):
|
||||
np.random.seed(1234)
|
||||
n = 20
|
||||
nsamples = 100
|
||||
for i in range(nsamples):
|
||||
|
||||
# Choose integer t uniformly between 1 and 3 inclusive.
|
||||
t = np.random.randint(1, 4)
|
||||
|
||||
# Choose n uniformly between 10 and 40 inclusive.
|
||||
n = np.random.randint(10, 41)
|
||||
|
||||
# Sample the inverse of a matrix with random normal entries.
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
|
||||
# Compute the 1-norm bounds.
|
||||
g, ind = _algorithm_2_2(A, A.T, t)
|
||||
|
||||
@ -0,0 +1,166 @@
|
||||
import os
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose
|
||||
from pytest import raises as assert_raises
|
||||
from scipy.sparse.linalg._svdp import _svdp
|
||||
from scipy.sparse import csr_matrix, csc_matrix
|
||||
|
||||
|
||||
# dtype_flavour to tolerance
|
||||
TOLS = {
|
||||
np.float32: 1e-4,
|
||||
np.float64: 1e-8,
|
||||
np.complex64: 1e-4,
|
||||
np.complex128: 1e-8,
|
||||
}
|
||||
|
||||
|
||||
def is_complex_type(dtype):
|
||||
return np.dtype(dtype).kind == "c"
|
||||
|
||||
|
||||
_dtypes = []
|
||||
for dtype_flavour in TOLS.keys():
|
||||
marks = []
|
||||
if is_complex_type(dtype_flavour):
|
||||
marks = [pytest.mark.slow]
|
||||
_dtypes.append(pytest.param(dtype_flavour, marks=marks,
|
||||
id=dtype_flavour.__name__))
|
||||
_dtypes = tuple(_dtypes) # type: ignore[assignment]
|
||||
|
||||
|
||||
def generate_matrix(constructor, n, m, f,
|
||||
dtype=float, rseed=0, **kwargs):
|
||||
"""Generate a random sparse matrix"""
|
||||
rng = np.random.RandomState(rseed)
|
||||
if is_complex_type(dtype):
|
||||
M = (- 5 + 10 * rng.rand(n, m)
|
||||
- 5j + 10j * rng.rand(n, m)).astype(dtype)
|
||||
else:
|
||||
M = (-5 + 10 * rng.rand(n, m)).astype(dtype)
|
||||
M[M.real > 10 * f - 5] = 0
|
||||
return constructor(M, **kwargs)
|
||||
|
||||
|
||||
def assert_orthogonal(u1, u2, rtol, atol):
|
||||
"""Check that the first k rows of u1 and u2 are orthogonal"""
|
||||
A = abs(np.dot(u1.conj().T, u2))
|
||||
assert_allclose(A, np.eye(u1.shape[1], u2.shape[1]), rtol=rtol, atol=atol)
|
||||
|
||||
|
||||
def check_svdp(n, m, constructor, dtype, k, irl_mode, which, f=0.8):
|
||||
tol = TOLS[dtype]
|
||||
|
||||
M = generate_matrix(np.asarray, n, m, f, dtype)
|
||||
Msp = constructor(M)
|
||||
|
||||
u1, sigma1, vt1 = np.linalg.svd(M, full_matrices=False)
|
||||
u2, sigma2, vt2, _ = _svdp(Msp, k=k, which=which, irl_mode=irl_mode,
|
||||
tol=tol)
|
||||
|
||||
# check the which
|
||||
if which.upper() == 'SM':
|
||||
u1 = np.roll(u1, k, 1)
|
||||
vt1 = np.roll(vt1, k, 0)
|
||||
sigma1 = np.roll(sigma1, k)
|
||||
|
||||
# check that singular values agree
|
||||
assert_allclose(sigma1[:k], sigma2, rtol=tol, atol=tol)
|
||||
|
||||
# check that singular vectors are orthogonal
|
||||
assert_orthogonal(u1, u2, rtol=tol, atol=tol)
|
||||
assert_orthogonal(vt1.T, vt2.T, rtol=tol, atol=tol)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('ctor', (np.array, csr_matrix, csc_matrix))
|
||||
@pytest.mark.parametrize('dtype', _dtypes)
|
||||
@pytest.mark.parametrize('irl', (True, False))
|
||||
@pytest.mark.parametrize('which', ('LM', 'SM'))
|
||||
def test_svdp(ctor, dtype, irl, which):
|
||||
np.random.seed(0)
|
||||
n, m, k = 10, 20, 3
|
||||
if which == 'SM' and not irl:
|
||||
message = "`which`='SM' requires irl_mode=True"
|
||||
with assert_raises(ValueError, match=message):
|
||||
check_svdp(n, m, ctor, dtype, k, irl, which)
|
||||
else:
|
||||
check_svdp(n, m, ctor, dtype, k, irl, which)
|
||||
|
||||
|
||||
@pytest.mark.xslow
|
||||
@pytest.mark.parametrize('dtype', _dtypes)
|
||||
@pytest.mark.parametrize('irl', (False, True))
|
||||
@pytest.mark.timeout(120) # True, complex64 > 60 s: prerel deps cov 64bit blas
|
||||
def test_examples(dtype, irl):
|
||||
# Note: atol for complex64 bumped from 1e-4 to 1e-3 due to test failures
|
||||
# with BLIS, Netlib, and MKL+AVX512 - see
|
||||
# https://github.com/conda-forge/scipy-feedstock/pull/198#issuecomment-999180432
|
||||
atol = {
|
||||
np.float32: 1.3e-4,
|
||||
np.float64: 1e-9,
|
||||
np.complex64: 1e-3,
|
||||
np.complex128: 1e-9,
|
||||
}[dtype]
|
||||
|
||||
path_prefix = os.path.dirname(__file__)
|
||||
# Test matrices from `illc1850.coord` and `mhd1280b.cua` distributed with
|
||||
# PROPACK 2.1: http://sun.stanford.edu/~rmunk/PROPACK/
|
||||
relative_path = "propack_test_data.npz"
|
||||
filename = os.path.join(path_prefix, relative_path)
|
||||
with np.load(filename, allow_pickle=True) as data:
|
||||
if is_complex_type(dtype):
|
||||
A = data['A_complex'].item().astype(dtype)
|
||||
else:
|
||||
A = data['A_real'].item().astype(dtype)
|
||||
|
||||
k = 200
|
||||
u, s, vh, _ = _svdp(A, k, irl_mode=irl, random_state=0)
|
||||
|
||||
# complex example matrix has many repeated singular values, so check only
|
||||
# beginning non-repeated singular vectors to avoid permutations
|
||||
sv_check = 27 if is_complex_type(dtype) else k
|
||||
u = u[:, :sv_check]
|
||||
vh = vh[:sv_check, :]
|
||||
s = s[:sv_check]
|
||||
|
||||
# Check orthogonality of singular vectors
|
||||
assert_allclose(np.eye(u.shape[1]), u.conj().T @ u, atol=atol)
|
||||
assert_allclose(np.eye(vh.shape[0]), vh @ vh.conj().T, atol=atol)
|
||||
|
||||
# Ensure the norm of the difference between the np.linalg.svd and
|
||||
# PROPACK reconstructed matrices is small
|
||||
u3, s3, vh3 = np.linalg.svd(A.todense())
|
||||
u3 = u3[:, :sv_check]
|
||||
s3 = s3[:sv_check]
|
||||
vh3 = vh3[:sv_check, :]
|
||||
A3 = u3 @ np.diag(s3) @ vh3
|
||||
recon = u @ np.diag(s) @ vh
|
||||
assert_allclose(np.linalg.norm(A3 - recon), 0, atol=atol)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('shifts', (None, -10, 0, 1, 10, 70))
|
||||
@pytest.mark.parametrize('dtype', _dtypes[:2])
|
||||
def test_shifts(shifts, dtype):
|
||||
np.random.seed(0)
|
||||
n, k = 70, 10
|
||||
A = np.random.random((n, n))
|
||||
if shifts is not None and ((shifts < 0) or (k > min(n-1-shifts, n))):
|
||||
with pytest.raises(ValueError):
|
||||
_svdp(A, k, shifts=shifts, kmax=5*k, irl_mode=True)
|
||||
else:
|
||||
_svdp(A, k, shifts=shifts, kmax=5*k, irl_mode=True)
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.xfail()
|
||||
def test_shifts_accuracy():
|
||||
np.random.seed(0)
|
||||
n, k = 70, 10
|
||||
A = np.random.random((n, n)).astype(np.float64)
|
||||
u1, s1, vt1, _ = _svdp(A, k, shifts=None, which='SM', irl_mode=True)
|
||||
u2, s2, vt2, _ = _svdp(A, k, shifts=32, which='SM', irl_mode=True)
|
||||
# shifts <= 32 doesn't agree with shifts > 32
|
||||
# Does agree when which='LM' instead of 'SM'
|
||||
assert_allclose(s1, s2)
|
||||
@ -0,0 +1,243 @@
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
import scipy.sparse as sp
|
||||
import scipy.sparse.linalg as splin
|
||||
|
||||
from numpy.testing import assert_allclose, assert_equal
|
||||
|
||||
try:
|
||||
import sparse
|
||||
except Exception:
|
||||
sparse = None
|
||||
|
||||
pytestmark = pytest.mark.skipif(sparse is None,
|
||||
reason="pydata/sparse not installed")
|
||||
|
||||
|
||||
msg = "pydata/sparse (0.15.1) does not implement necessary operations"
|
||||
|
||||
|
||||
sparse_params = (pytest.param("COO"),
|
||||
pytest.param("DOK", marks=[pytest.mark.xfail(reason=msg)]))
|
||||
|
||||
scipy_sparse_classes = [
|
||||
sp.bsr_matrix,
|
||||
sp.csr_matrix,
|
||||
sp.coo_matrix,
|
||||
sp.csc_matrix,
|
||||
sp.dia_matrix,
|
||||
sp.dok_matrix
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(params=sparse_params)
|
||||
def sparse_cls(request):
|
||||
return getattr(sparse, request.param)
|
||||
|
||||
|
||||
@pytest.fixture(params=scipy_sparse_classes)
|
||||
def sp_sparse_cls(request):
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def same_matrix(sparse_cls, sp_sparse_cls):
|
||||
np.random.seed(1234)
|
||||
A_dense = np.random.rand(9, 9)
|
||||
return sp_sparse_cls(A_dense), sparse_cls(A_dense)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def matrices(sparse_cls):
|
||||
np.random.seed(1234)
|
||||
A_dense = np.random.rand(9, 9)
|
||||
A_dense = A_dense @ A_dense.T
|
||||
A_sparse = sparse_cls(A_dense)
|
||||
b = np.random.rand(9)
|
||||
return A_dense, A_sparse, b
|
||||
|
||||
|
||||
def test_isolve_gmres(matrices):
|
||||
# Several of the iterative solvers use the same
|
||||
# isolve.utils.make_system wrapper code, so test just one of them.
|
||||
A_dense, A_sparse, b = matrices
|
||||
x, info = splin.gmres(A_sparse, b, atol=1e-15)
|
||||
assert info == 0
|
||||
assert isinstance(x, np.ndarray)
|
||||
assert_allclose(A_sparse @ x, b)
|
||||
|
||||
|
||||
def test_lsmr(matrices):
|
||||
A_dense, A_sparse, b = matrices
|
||||
res0 = splin.lsmr(A_dense, b)
|
||||
res = splin.lsmr(A_sparse, b)
|
||||
assert_allclose(res[0], res0[0], atol=1e-3)
|
||||
|
||||
|
||||
# test issue 17012
|
||||
def test_lsmr_output_shape():
|
||||
x = splin.lsmr(A=np.ones((10, 1)), b=np.zeros(10), x0=np.ones(1))[0]
|
||||
assert_equal(x.shape, (1,))
|
||||
|
||||
|
||||
def test_lsqr(matrices):
|
||||
A_dense, A_sparse, b = matrices
|
||||
res0 = splin.lsqr(A_dense, b)
|
||||
res = splin.lsqr(A_sparse, b)
|
||||
assert_allclose(res[0], res0[0], atol=1e-5)
|
||||
|
||||
|
||||
def test_eigs(matrices):
|
||||
A_dense, A_sparse, v0 = matrices
|
||||
|
||||
M_dense = np.diag(v0**2)
|
||||
M_sparse = A_sparse.__class__(M_dense)
|
||||
|
||||
w_dense, v_dense = splin.eigs(A_dense, k=3, v0=v0)
|
||||
w, v = splin.eigs(A_sparse, k=3, v0=v0)
|
||||
|
||||
assert_allclose(w, w_dense)
|
||||
assert_allclose(v, v_dense)
|
||||
|
||||
for M in [M_sparse, M_dense]:
|
||||
w_dense, v_dense = splin.eigs(A_dense, M=M_dense, k=3, v0=v0)
|
||||
w, v = splin.eigs(A_sparse, M=M, k=3, v0=v0)
|
||||
|
||||
assert_allclose(w, w_dense)
|
||||
assert_allclose(v, v_dense)
|
||||
|
||||
w_dense, v_dense = splin.eigsh(A_dense, M=M_dense, k=3, v0=v0)
|
||||
w, v = splin.eigsh(A_sparse, M=M, k=3, v0=v0)
|
||||
|
||||
assert_allclose(w, w_dense)
|
||||
assert_allclose(v, v_dense)
|
||||
|
||||
|
||||
def test_svds(matrices):
|
||||
A_dense, A_sparse, v0 = matrices
|
||||
|
||||
u0, s0, vt0 = splin.svds(A_dense, k=2, v0=v0)
|
||||
u, s, vt = splin.svds(A_sparse, k=2, v0=v0)
|
||||
|
||||
assert_allclose(s, s0)
|
||||
assert_allclose(np.abs(u), np.abs(u0))
|
||||
assert_allclose(np.abs(vt), np.abs(vt0))
|
||||
|
||||
|
||||
def test_lobpcg(matrices):
|
||||
A_dense, A_sparse, x = matrices
|
||||
X = x[:,None]
|
||||
|
||||
w_dense, v_dense = splin.lobpcg(A_dense, X)
|
||||
w, v = splin.lobpcg(A_sparse, X)
|
||||
|
||||
assert_allclose(w, w_dense)
|
||||
assert_allclose(v, v_dense)
|
||||
|
||||
|
||||
def test_spsolve(matrices):
|
||||
A_dense, A_sparse, b = matrices
|
||||
b2 = np.random.rand(len(b), 3)
|
||||
|
||||
x0 = splin.spsolve(sp.csc_matrix(A_dense), b)
|
||||
x = splin.spsolve(A_sparse, b)
|
||||
assert isinstance(x, np.ndarray)
|
||||
assert_allclose(x, x0)
|
||||
|
||||
x0 = splin.spsolve(sp.csc_matrix(A_dense), b)
|
||||
x = splin.spsolve(A_sparse, b, use_umfpack=True)
|
||||
assert isinstance(x, np.ndarray)
|
||||
assert_allclose(x, x0)
|
||||
|
||||
x0 = splin.spsolve(sp.csc_matrix(A_dense), b2)
|
||||
x = splin.spsolve(A_sparse, b2)
|
||||
assert isinstance(x, np.ndarray)
|
||||
assert_allclose(x, x0)
|
||||
|
||||
x0 = splin.spsolve(sp.csc_matrix(A_dense),
|
||||
sp.csc_matrix(A_dense))
|
||||
x = splin.spsolve(A_sparse, A_sparse)
|
||||
assert isinstance(x, type(A_sparse))
|
||||
assert_allclose(x.todense(), x0.todense())
|
||||
|
||||
|
||||
def test_splu(matrices):
|
||||
A_dense, A_sparse, b = matrices
|
||||
n = len(b)
|
||||
sparse_cls = type(A_sparse)
|
||||
|
||||
lu = splin.splu(A_sparse)
|
||||
|
||||
assert isinstance(lu.L, sparse_cls)
|
||||
assert isinstance(lu.U, sparse_cls)
|
||||
|
||||
_Pr_scipy = sp.csc_matrix((np.ones(n), (lu.perm_r, np.arange(n))))
|
||||
_Pc_scipy = sp.csc_matrix((np.ones(n), (np.arange(n), lu.perm_c)))
|
||||
Pr = sparse_cls.from_scipy_sparse(_Pr_scipy)
|
||||
Pc = sparse_cls.from_scipy_sparse(_Pc_scipy)
|
||||
A2 = Pr.T @ lu.L @ lu.U @ Pc.T
|
||||
|
||||
assert_allclose(A2.todense(), A_sparse.todense())
|
||||
|
||||
z = lu.solve(A_sparse.todense())
|
||||
assert_allclose(z, np.eye(n), atol=1e-10)
|
||||
|
||||
|
||||
def test_spilu(matrices):
|
||||
A_dense, A_sparse, b = matrices
|
||||
sparse_cls = type(A_sparse)
|
||||
|
||||
lu = splin.spilu(A_sparse)
|
||||
|
||||
assert isinstance(lu.L, sparse_cls)
|
||||
assert isinstance(lu.U, sparse_cls)
|
||||
|
||||
z = lu.solve(A_sparse.todense())
|
||||
assert_allclose(z, np.eye(len(b)), atol=1e-3)
|
||||
|
||||
|
||||
def test_spsolve_triangular(matrices):
|
||||
A_dense, A_sparse, b = matrices
|
||||
A_sparse = sparse.tril(A_sparse)
|
||||
|
||||
x = splin.spsolve_triangular(A_sparse, b)
|
||||
assert_allclose(A_sparse @ x, b)
|
||||
|
||||
|
||||
def test_onenormest(matrices):
|
||||
A_dense, A_sparse, b = matrices
|
||||
est0 = splin.onenormest(A_dense)
|
||||
est = splin.onenormest(A_sparse)
|
||||
assert_allclose(est, est0)
|
||||
|
||||
|
||||
def test_inv(matrices):
|
||||
A_dense, A_sparse, b = matrices
|
||||
x0 = splin.inv(sp.csc_matrix(A_dense))
|
||||
x = splin.inv(A_sparse)
|
||||
assert_allclose(x.todense(), x0.todense())
|
||||
|
||||
|
||||
def test_expm(matrices):
|
||||
A_dense, A_sparse, b = matrices
|
||||
x0 = splin.expm(sp.csc_matrix(A_dense))
|
||||
x = splin.expm(A_sparse)
|
||||
assert_allclose(x.todense(), x0.todense())
|
||||
|
||||
|
||||
def test_expm_multiply(matrices):
|
||||
A_dense, A_sparse, b = matrices
|
||||
x0 = splin.expm_multiply(A_dense, b)
|
||||
x = splin.expm_multiply(A_sparse, b)
|
||||
assert_allclose(x, x0)
|
||||
|
||||
|
||||
def test_eq(same_matrix):
|
||||
sp_sparse, pd_sparse = same_matrix
|
||||
assert (sp_sparse == pd_sparse).all()
|
||||
|
||||
|
||||
def test_ne(same_matrix):
|
||||
sp_sparse, pd_sparse = same_matrix
|
||||
assert not (sp_sparse != pd_sparse).any()
|
||||
@ -0,0 +1,337 @@
|
||||
import pytest
|
||||
import numpy as np
|
||||
from numpy.testing import assert_array_equal, assert_allclose
|
||||
|
||||
from scipy.sparse import diags, csgraph
|
||||
from scipy.linalg import eigh
|
||||
|
||||
from scipy.sparse.linalg import LaplacianNd
|
||||
from scipy.sparse.linalg._special_sparse_arrays import Sakurai
|
||||
from scipy.sparse.linalg._special_sparse_arrays import MikotaPair
|
||||
|
||||
INT_DTYPES = [np.int8, np.int16, np.int32, np.int64]
|
||||
REAL_DTYPES = [np.float32, np.float64]
|
||||
COMPLEX_DTYPES = [np.complex64, np.complex128]
|
||||
ALLDTYPES = INT_DTYPES + REAL_DTYPES + COMPLEX_DTYPES
|
||||
|
||||
|
||||
class TestLaplacianNd:
|
||||
"""
|
||||
LaplacianNd tests
|
||||
"""
|
||||
|
||||
@pytest.mark.parametrize('bc', ['neumann', 'dirichlet', 'periodic'])
|
||||
def test_1d_specific_shape(self, bc):
|
||||
lap = LaplacianNd(grid_shape=(6, ), boundary_conditions=bc)
|
||||
lapa = lap.toarray()
|
||||
if bc == 'neumann':
|
||||
a = np.array(
|
||||
[
|
||||
[-1, 1, 0, 0, 0, 0],
|
||||
[1, -2, 1, 0, 0, 0],
|
||||
[0, 1, -2, 1, 0, 0],
|
||||
[0, 0, 1, -2, 1, 0],
|
||||
[0, 0, 0, 1, -2, 1],
|
||||
[0, 0, 0, 0, 1, -1],
|
||||
]
|
||||
)
|
||||
elif bc == 'dirichlet':
|
||||
a = np.array(
|
||||
[
|
||||
[-2, 1, 0, 0, 0, 0],
|
||||
[1, -2, 1, 0, 0, 0],
|
||||
[0, 1, -2, 1, 0, 0],
|
||||
[0, 0, 1, -2, 1, 0],
|
||||
[0, 0, 0, 1, -2, 1],
|
||||
[0, 0, 0, 0, 1, -2],
|
||||
]
|
||||
)
|
||||
else:
|
||||
a = np.array(
|
||||
[
|
||||
[-2, 1, 0, 0, 0, 1],
|
||||
[1, -2, 1, 0, 0, 0],
|
||||
[0, 1, -2, 1, 0, 0],
|
||||
[0, 0, 1, -2, 1, 0],
|
||||
[0, 0, 0, 1, -2, 1],
|
||||
[1, 0, 0, 0, 1, -2],
|
||||
]
|
||||
)
|
||||
assert_array_equal(a, lapa)
|
||||
|
||||
def test_1d_with_graph_laplacian(self):
|
||||
n = 6
|
||||
G = diags(np.ones(n - 1), 1, format='dia')
|
||||
Lf = csgraph.laplacian(G, symmetrized=True, form='function')
|
||||
La = csgraph.laplacian(G, symmetrized=True, form='array')
|
||||
grid_shape = (n,)
|
||||
bc = 'neumann'
|
||||
lap = LaplacianNd(grid_shape, boundary_conditions=bc)
|
||||
assert_array_equal(lap(np.eye(n)), -Lf(np.eye(n)))
|
||||
assert_array_equal(lap.toarray(), -La.toarray())
|
||||
# https://github.com/numpy/numpy/issues/24351
|
||||
assert_array_equal(lap.tosparse().toarray(), -La.toarray())
|
||||
|
||||
@pytest.mark.parametrize('grid_shape', [(6, ), (2, 3), (2, 3, 4)])
|
||||
@pytest.mark.parametrize('bc', ['neumann', 'dirichlet', 'periodic'])
|
||||
def test_eigenvalues(self, grid_shape, bc):
|
||||
lap = LaplacianNd(grid_shape, boundary_conditions=bc, dtype=np.float64)
|
||||
L = lap.toarray()
|
||||
eigvals = eigh(L, eigvals_only=True)
|
||||
n = np.prod(grid_shape)
|
||||
eigenvalues = lap.eigenvalues()
|
||||
dtype = eigenvalues.dtype
|
||||
atol = n * n * np.finfo(dtype).eps
|
||||
# test the default ``m = None``
|
||||
assert_allclose(eigenvalues, eigvals, atol=atol)
|
||||
# test every ``m > 0``
|
||||
for m in np.arange(1, n + 1):
|
||||
assert_array_equal(lap.eigenvalues(m), eigenvalues[-m:])
|
||||
|
||||
@pytest.mark.parametrize('grid_shape', [(6, ), (2, 3), (2, 3, 4)])
|
||||
@pytest.mark.parametrize('bc', ['neumann', 'dirichlet', 'periodic'])
|
||||
def test_eigenvectors(self, grid_shape, bc):
|
||||
lap = LaplacianNd(grid_shape, boundary_conditions=bc, dtype=np.float64)
|
||||
n = np.prod(grid_shape)
|
||||
eigenvalues = lap.eigenvalues()
|
||||
eigenvectors = lap.eigenvectors()
|
||||
dtype = eigenvectors.dtype
|
||||
atol = n * n * max(np.finfo(dtype).eps, np.finfo(np.double).eps)
|
||||
# test the default ``m = None`` every individual eigenvector
|
||||
for i in np.arange(n):
|
||||
r = lap.toarray() @ eigenvectors[:, i] - eigenvectors[:, i] * eigenvalues[i]
|
||||
assert_allclose(r, np.zeros_like(r), atol=atol)
|
||||
# test every ``m > 0``
|
||||
for m in np.arange(1, n + 1):
|
||||
e = lap.eigenvalues(m)
|
||||
ev = lap.eigenvectors(m)
|
||||
r = lap.toarray() @ ev - ev @ np.diag(e)
|
||||
assert_allclose(r, np.zeros_like(r), atol=atol)
|
||||
|
||||
@pytest.mark.parametrize('grid_shape', [(6, ), (2, 3), (2, 3, 4)])
|
||||
@pytest.mark.parametrize('bc', ['neumann', 'dirichlet', 'periodic'])
|
||||
def test_toarray_tosparse_consistency(self, grid_shape, bc):
|
||||
lap = LaplacianNd(grid_shape, boundary_conditions=bc)
|
||||
n = np.prod(grid_shape)
|
||||
assert_array_equal(lap.toarray(), lap(np.eye(n)))
|
||||
assert_array_equal(lap.tosparse().toarray(), lap.toarray())
|
||||
|
||||
@pytest.mark.parametrize('dtype', ALLDTYPES)
|
||||
@pytest.mark.parametrize('grid_shape', [(6, ), (2, 3), (2, 3, 4)])
|
||||
@pytest.mark.parametrize('bc', ['neumann', 'dirichlet', 'periodic'])
|
||||
def test_linearoperator_shape_dtype(self, grid_shape, bc, dtype):
|
||||
lap = LaplacianNd(grid_shape, boundary_conditions=bc, dtype=dtype)
|
||||
n = np.prod(grid_shape)
|
||||
assert lap.shape == (n, n)
|
||||
assert lap.dtype == dtype
|
||||
assert_array_equal(
|
||||
LaplacianNd(
|
||||
grid_shape, boundary_conditions=bc, dtype=dtype
|
||||
).toarray(),
|
||||
LaplacianNd(grid_shape, boundary_conditions=bc)
|
||||
.toarray()
|
||||
.astype(dtype),
|
||||
)
|
||||
assert_array_equal(
|
||||
LaplacianNd(grid_shape, boundary_conditions=bc, dtype=dtype)
|
||||
.tosparse()
|
||||
.toarray(),
|
||||
LaplacianNd(grid_shape, boundary_conditions=bc)
|
||||
.tosparse()
|
||||
.toarray()
|
||||
.astype(dtype),
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('dtype', ALLDTYPES)
|
||||
@pytest.mark.parametrize('grid_shape', [(6, ), (2, 3), (2, 3, 4)])
|
||||
@pytest.mark.parametrize('bc', ['neumann', 'dirichlet', 'periodic'])
|
||||
def test_dot(self, grid_shape, bc, dtype):
|
||||
""" Test the dot-product for type preservation and consistency.
|
||||
"""
|
||||
lap = LaplacianNd(grid_shape, boundary_conditions=bc)
|
||||
n = np.prod(grid_shape)
|
||||
x0 = np.arange(n)
|
||||
x1 = x0.reshape((-1, 1))
|
||||
x2 = np.arange(2 * n).reshape((n, 2))
|
||||
input_set = [x0, x1, x2]
|
||||
for x in input_set:
|
||||
y = lap.dot(x.astype(dtype))
|
||||
assert x.shape == y.shape
|
||||
assert y.dtype == dtype
|
||||
if x.ndim == 2:
|
||||
yy = lap.toarray() @ x.astype(dtype)
|
||||
assert yy.dtype == dtype
|
||||
np.array_equal(y, yy)
|
||||
|
||||
def test_boundary_conditions_value_error(self):
|
||||
with pytest.raises(ValueError, match="Unknown value 'robin'"):
|
||||
LaplacianNd(grid_shape=(6, ), boundary_conditions='robin')
|
||||
|
||||
|
||||
class TestSakurai:
|
||||
"""
|
||||
Sakurai tests
|
||||
"""
|
||||
|
||||
def test_specific_shape(self):
|
||||
sak = Sakurai(6)
|
||||
assert_array_equal(sak.toarray(), sak(np.eye(6)))
|
||||
a = np.array(
|
||||
[
|
||||
[ 5, -4, 1, 0, 0, 0],
|
||||
[-4, 6, -4, 1, 0, 0],
|
||||
[ 1, -4, 6, -4, 1, 0],
|
||||
[ 0, 1, -4, 6, -4, 1],
|
||||
[ 0, 0, 1, -4, 6, -4],
|
||||
[ 0, 0, 0, 1, -4, 5]
|
||||
]
|
||||
)
|
||||
|
||||
np.array_equal(a, sak.toarray())
|
||||
np.array_equal(sak.tosparse().toarray(), sak.toarray())
|
||||
ab = np.array(
|
||||
[
|
||||
[ 1, 1, 1, 1, 1, 1],
|
||||
[-4, -4, -4, -4, -4, -4],
|
||||
[ 5, 6, 6, 6, 6, 5]
|
||||
]
|
||||
)
|
||||
np.array_equal(ab, sak.tobanded())
|
||||
e = np.array(
|
||||
[0.03922866, 0.56703972, 2.41789479, 5.97822974,
|
||||
10.54287655, 14.45473055]
|
||||
)
|
||||
np.array_equal(e, sak.eigenvalues())
|
||||
np.array_equal(e[:2], sak.eigenvalues(2))
|
||||
|
||||
# `Sakurai` default `dtype` is `np.int8` as its entries are small integers
|
||||
@pytest.mark.parametrize('dtype', ALLDTYPES)
|
||||
def test_linearoperator_shape_dtype(self, dtype):
|
||||
n = 7
|
||||
sak = Sakurai(n, dtype=dtype)
|
||||
assert sak.shape == (n, n)
|
||||
assert sak.dtype == dtype
|
||||
assert_array_equal(sak.toarray(), Sakurai(n).toarray().astype(dtype))
|
||||
assert_array_equal(sak.tosparse().toarray(),
|
||||
Sakurai(n).tosparse().toarray().astype(dtype))
|
||||
|
||||
@pytest.mark.parametrize('dtype', ALLDTYPES)
|
||||
@pytest.mark.parametrize('argument_dtype', ALLDTYPES)
|
||||
def test_dot(self, dtype, argument_dtype):
|
||||
""" Test the dot-product for type preservation and consistency.
|
||||
"""
|
||||
result_dtype = np.promote_types(argument_dtype, dtype)
|
||||
n = 5
|
||||
sak = Sakurai(n)
|
||||
x0 = np.arange(n)
|
||||
x1 = x0.reshape((-1, 1))
|
||||
x2 = np.arange(2 * n).reshape((n, 2))
|
||||
input_set = [x0, x1, x2]
|
||||
for x in input_set:
|
||||
y = sak.dot(x.astype(argument_dtype))
|
||||
assert x.shape == y.shape
|
||||
assert np.can_cast(y.dtype, result_dtype)
|
||||
if x.ndim == 2:
|
||||
ya = sak.toarray() @ x.astype(argument_dtype)
|
||||
np.array_equal(y, ya)
|
||||
assert np.can_cast(ya.dtype, result_dtype)
|
||||
ys = sak.tosparse() @ x.astype(argument_dtype)
|
||||
np.array_equal(y, ys)
|
||||
assert np.can_cast(ys.dtype, result_dtype)
|
||||
|
||||
class TestMikotaPair:
|
||||
"""
|
||||
MikotaPair tests
|
||||
"""
|
||||
# both MikotaPair `LinearOperator`s share the same dtype
|
||||
# while `MikotaK` `dtype` can be as small as its default `np.int32`
|
||||
# since its entries are integers, the `MikotaM` involves inverses
|
||||
# so its smallest still accurate `dtype` is `np.float32`
|
||||
tested_types = REAL_DTYPES + COMPLEX_DTYPES
|
||||
|
||||
def test_specific_shape(self):
|
||||
n = 6
|
||||
mik = MikotaPair(n)
|
||||
mik_k = mik.k
|
||||
mik_m = mik.m
|
||||
assert_array_equal(mik_k.toarray(), mik_k(np.eye(n)))
|
||||
assert_array_equal(mik_m.toarray(), mik_m(np.eye(n)))
|
||||
|
||||
k = np.array(
|
||||
[
|
||||
[11, -5, 0, 0, 0, 0],
|
||||
[-5, 9, -4, 0, 0, 0],
|
||||
[ 0, -4, 7, -3, 0, 0],
|
||||
[ 0, 0, -3, 5, -2, 0],
|
||||
[ 0, 0, 0, -2, 3, -1],
|
||||
[ 0, 0, 0, 0, -1, 1]
|
||||
]
|
||||
)
|
||||
np.array_equal(k, mik_k.toarray())
|
||||
np.array_equal(mik_k.tosparse().toarray(), k)
|
||||
kb = np.array(
|
||||
[
|
||||
[ 0, -5, -4, -3, -2, -1],
|
||||
[11, 9, 7, 5, 3, 1]
|
||||
]
|
||||
)
|
||||
np.array_equal(kb, mik_k.tobanded())
|
||||
|
||||
minv = np.arange(1, n + 1)
|
||||
np.array_equal(np.diag(1. / minv), mik_m.toarray())
|
||||
np.array_equal(mik_m.tosparse().toarray(), mik_m.toarray())
|
||||
np.array_equal(1. / minv, mik_m.tobanded())
|
||||
|
||||
e = np.array([ 1, 4, 9, 16, 25, 36])
|
||||
np.array_equal(e, mik.eigenvalues())
|
||||
np.array_equal(e[:2], mik.eigenvalues(2))
|
||||
|
||||
@pytest.mark.parametrize('dtype', tested_types)
|
||||
def test_linearoperator_shape_dtype(self, dtype):
|
||||
n = 7
|
||||
mik = MikotaPair(n, dtype=dtype)
|
||||
mik_k = mik.k
|
||||
mik_m = mik.m
|
||||
assert mik_k.shape == (n, n)
|
||||
assert mik_k.dtype == dtype
|
||||
assert mik_m.shape == (n, n)
|
||||
assert mik_m.dtype == dtype
|
||||
mik_default_dtype = MikotaPair(n)
|
||||
mikd_k = mik_default_dtype.k
|
||||
mikd_m = mik_default_dtype.m
|
||||
assert mikd_k.shape == (n, n)
|
||||
assert mikd_k.dtype == np.float64
|
||||
assert mikd_m.shape == (n, n)
|
||||
assert mikd_m.dtype == np.float64
|
||||
assert_array_equal(mik_k.toarray(),
|
||||
mikd_k.toarray().astype(dtype))
|
||||
assert_array_equal(mik_k.tosparse().toarray(),
|
||||
mikd_k.tosparse().toarray().astype(dtype))
|
||||
|
||||
@pytest.mark.parametrize('dtype', tested_types)
|
||||
@pytest.mark.parametrize('argument_dtype', ALLDTYPES)
|
||||
def test_dot(self, dtype, argument_dtype):
|
||||
""" Test the dot-product for type preservation and consistency.
|
||||
"""
|
||||
result_dtype = np.promote_types(argument_dtype, dtype)
|
||||
n = 5
|
||||
mik = MikotaPair(n, dtype=dtype)
|
||||
mik_k = mik.k
|
||||
mik_m = mik.m
|
||||
x0 = np.arange(n)
|
||||
x1 = x0.reshape((-1, 1))
|
||||
x2 = np.arange(2 * n).reshape((n, 2))
|
||||
lo_set = [mik_k, mik_m]
|
||||
input_set = [x0, x1, x2]
|
||||
for lo in lo_set:
|
||||
for x in input_set:
|
||||
y = lo.dot(x.astype(argument_dtype))
|
||||
assert x.shape == y.shape
|
||||
assert np.can_cast(y.dtype, result_dtype)
|
||||
if x.ndim == 2:
|
||||
ya = lo.toarray() @ x.astype(argument_dtype)
|
||||
np.array_equal(y, ya)
|
||||
assert np.can_cast(ya.dtype, result_dtype)
|
||||
ys = lo.tosparse() @ x.astype(argument_dtype)
|
||||
np.array_equal(y, ys)
|
||||
assert np.can_cast(ys.dtype, result_dtype)
|
||||
Reference in New Issue
Block a user