diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 34f458113..5ee1e3422 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -231,6 +231,7 @@ repos: ^python/cucim/src/cucim/skimage/restoration/tests/test_restoration[.]py$| ^python/cucim/src/cucim/skimage/restoration/uft[.]py$| ^python/cucim/src/cucim/skimage/segmentation/__init__[.]py$| + ^python/cucim/src/cucim/skimage/segmentation/__init__[.]pyi$| ^python/cucim/src/cucim/skimage/segmentation/_chan_vese[.]py$| ^python/cucim/src/cucim/skimage/segmentation/_clear_border[.]py$| ^python/cucim/src/cucim/skimage/segmentation/_join[.]py$| @@ -509,6 +510,7 @@ repos: python/cucim/src/cucim/skimage/restoration/tests/test_restoration[.]py$| python/cucim/src/cucim/skimage/restoration/uft[.]py$| python/cucim/src/cucim/skimage/segmentation/__init__[.]py$| + python/cucim/src/cucim/skimage/segmentation/__init__[.]pyi$| python/cucim/src/cucim/skimage/segmentation/_chan_vese[.]py$| python/cucim/src/cucim/skimage/segmentation/_clear_border[.]py$| python/cucim/src/cucim/skimage/segmentation/_join[.]py$| diff --git a/conda/environments/all_cuda-129_arch-aarch64.yaml b/conda/environments/all_cuda-129_arch-aarch64.yaml index 7ffd6cd09..53e27ab69 100644 --- a/conda/environments/all_cuda-129_arch-aarch64.yaml +++ b/conda/environments/all_cuda-129_arch-aarch64.yaml @@ -39,7 +39,7 @@ dependencies: - python>=3.11,<3.14 - pywavelets>=1.6 - recommonmark -- scikit-image>=0.19.0,<0.26.0 +- scikit-image>=0.23.2,<0.27.0 - scipy>=1.11.2 - sphinx>=8.0.0,<8.2.0 - sysroot_linux-aarch64==2.28 diff --git a/conda/environments/all_cuda-129_arch-x86_64.yaml b/conda/environments/all_cuda-129_arch-x86_64.yaml index 09a90199f..ccc9c0f96 100644 --- a/conda/environments/all_cuda-129_arch-x86_64.yaml +++ b/conda/environments/all_cuda-129_arch-x86_64.yaml @@ -40,7 +40,7 @@ dependencies: - python>=3.11,<3.14 - pywavelets>=1.6 - recommonmark -- scikit-image>=0.19.0,<0.26.0 +- scikit-image>=0.23.2,<0.27.0 - scipy>=1.11.2 - sphinx>=8.0.0,<8.2.0 - sysroot_linux-64==2.28 diff --git a/conda/environments/all_cuda-131_arch-aarch64.yaml b/conda/environments/all_cuda-131_arch-aarch64.yaml index e4c06914c..76d77ca9e 100644 --- a/conda/environments/all_cuda-131_arch-aarch64.yaml +++ b/conda/environments/all_cuda-131_arch-aarch64.yaml @@ -39,7 +39,7 @@ dependencies: - python>=3.11,<3.14 - pywavelets>=1.6 - recommonmark -- scikit-image>=0.19.0,<0.26.0 +- scikit-image>=0.23.2,<0.27.0 - scipy>=1.11.2 - sphinx>=8.0.0,<8.2.0 - sysroot_linux-aarch64==2.28 diff --git a/conda/environments/all_cuda-131_arch-x86_64.yaml b/conda/environments/all_cuda-131_arch-x86_64.yaml index 74ebe3b8d..adf16ff1c 100644 --- a/conda/environments/all_cuda-131_arch-x86_64.yaml +++ b/conda/environments/all_cuda-131_arch-x86_64.yaml @@ -40,7 +40,7 @@ dependencies: - python>=3.11,<3.14 - pywavelets>=1.6 - recommonmark -- scikit-image>=0.19.0,<0.26.0 +- scikit-image>=0.23.2,<0.27.0 - scipy>=1.11.2 - sphinx>=8.0.0,<8.2.0 - sysroot_linux-64==2.28 diff --git a/dependencies.yaml b/dependencies.yaml index 25f1376c6..d0bb185fe 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -237,7 +237,7 @@ dependencies: - click - lazy-loader>=0.4 - numpy>=1.23.4,<3.0 - - scikit-image>=0.19.0,<0.26.0 + - scikit-image>=0.23.2,<0.27.0 - scipy>=1.11.2 - output_types: conda packages: diff --git a/python/cucim/pyproject.toml b/python/cucim/pyproject.toml index fbfa74f83..9f6565d57 100644 --- a/python/cucim/pyproject.toml +++ b/python/cucim/pyproject.toml @@ -34,7 +34,7 @@ dependencies = [ "lazy-loader>=0.4", "numpy>=1.23.4,<3.0", "nvidia-nvimgcodec-cu13>=0.7.0,<0.8.0", - "scikit-image>=0.19.0,<0.26.0", + "scikit-image>=0.23.2,<0.27.0", "scipy>=1.11.2", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ diff --git a/python/cucim/src/cucim/skimage/_shared/compat.py b/python/cucim/src/cucim/skimage/_shared/compat.py index 5795b0186..d2f04740d 100644 --- a/python/cucim/src/cucim/skimage/_shared/compat.py +++ b/python/cucim/src/cucim/skimage/_shared/compat.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2024-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause """Compatibility helpers for dependencies.""" @@ -28,6 +28,7 @@ # check CuPy instead of SciPy CUPY_LT_14 = parse(cp.__version__) < parse("14.0.0a1") + # Starting in SciPy v1.12, 'scipy.sparse.linalg.cg' keyword argument `tol` is # deprecated in favor of `rtol`. The corresponding change in cupyx.scipy.sparse # was made in v14.0 diff --git a/python/cucim/src/cucim/skimage/_shared/coord.py b/python/cucim/src/cucim/skimage/_shared/coord.py index 3cae908a1..9f91d3b2b 100644 --- a/python/cucim/src/cucim/skimage/_shared/coord.py +++ b/python/cucim/src/cucim/skimage/_shared/coord.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp @@ -22,7 +22,7 @@ def _ensure_spacing(coord, spacing, p_norm, max_out): A finite large p may cause a ValueError if overflow can occur. ``inf`` corresponds to the Chebyshev distance and 2 to the Euclidean distance. - max_out: int + max_out : int If not None, at most the first ``max_out`` candidates are returned. diff --git a/python/cucim/src/cucim/skimage/_shared/tests/test_utils.py b/python/cucim/src/cucim/skimage/_shared/tests/test_utils.py index 55033d363..37ffbe6de 100644 --- a/python/cucim/src/cucim/skimage/_shared/tests/test_utils.py +++ b/python/cucim/src/cucim/skimage/_shared/tests/test_utils.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause - +import re import sys import warnings @@ -12,11 +12,14 @@ from cucim.skimage._shared import testing from cucim.skimage._shared.utils import ( DEPRECATED, + FailedEstimation, + FailedEstimationAccessError, _supported_float_type, _validate_interpolation_order, change_default_value, channel_as_last_axis, check_nD, + deprecate_func, deprecate_parameter, ) @@ -34,7 +37,9 @@ def test_change_default_value(): - @change_default_value("arg1", new_value=-1, changed_version="0.12") + @change_default_value( + "arg1", new_value=-1, changed_version="0.12", stacklevel=2 + ) def foo(arg0, arg1=0, arg2=1): """Expected docstring""" return arg0, arg1, arg2 @@ -44,6 +49,7 @@ def foo(arg0, arg1=0, arg2=1): new_value=-1, changed_version="0.12", warning_msg="Custom warning message", + stacklevel=2, ) def bar(arg0, arg1=0, arg2=1): """Expected docstring""" @@ -202,6 +208,29 @@ def test_decorated_channel_axis_shape(channel_axis): assert size == x.shape[channel_axis] +@deprecate_func( + deprecated_version="x", removed_version="y", hint="You are on your own." +) +def _deprecated_func(): + """Dummy function used in `test_deprecate_func`. + + The decorated function must be outside the test function, otherwise it + seems that the warning does not point at the calling location. + """ + + +def test_deprecate_func(): + with pytest.warns(FutureWarning) as record: + _deprecated_func() + testing.assert_stacklevel(record) + + assert len(record) == 1 + assert record[0].message.args[0] == ( + "`_deprecated_func` is deprecated since version x and will be removed in " + "version y. You are on your own." + ) + + @deprecate_parameter("old1", start_version="0.10", stop_version="0.12") @deprecate_parameter("old0", start_version="0.10", stop_version="0.12") def _func_deprecated_params(arg0, old0=DEPRECATED, old1=DEPRECATED, arg1=None): @@ -478,7 +507,7 @@ def test_wrong_call_signature(self): _func_deprecated_params(1, 2, old0=2) def test_wrong_param_name(self): - with pytest.raises(ValueError, match="'old' is not in list"): + with pytest.raises(ValueError, match="'old' not in parameters"): @deprecate_parameter( "old", start_version="0.10", stop_version="0.12" @@ -486,7 +515,7 @@ def test_wrong_param_name(self): def foo(arg0): pass - with pytest.raises(ValueError, match="'new' is not in list"): + with pytest.raises(ValueError, match="'new' not in parameters"): @deprecate_parameter( "old", new_name="new", start_version="0.10", stop_version="0.12" @@ -509,7 +538,8 @@ def test_stacklevel(self): def foo(arg0, old=DEPRECATED): pass - with pytest.raises(RuntimeError, match="Set stacklevel manually"): + regex = "Cannot determine stacklevel.*Set the stacklevel manually" + with pytest.raises(ValueError, match=regex): foo(0, 1) @deprecate_parameter( @@ -526,3 +556,24 @@ def bar(arg0, old=DEPRECATED): ) as records: bar(0, 1) testing.assert_stacklevel(records) + + +def test_failed_estimation(): + msg = "Something went wrong with estimation" + fe = FailedEstimation(msg) + assert fe.message == msg + assert str(fe) == msg + assert repr(fe).startswith("FailedEstimation(") + assert bool(fe) is False + + regex = re.compile( + "FailedEstimation is not callable.*Hint", flags=re.DOTALL + ) + with pytest.raises(FailedEstimationAccessError, match=regex): + fe(np.ones((10, 2))) + + regex = re.compile( + "FailedEstimation has no attribute 'params'.*Hint", flags=re.DOTALL + ) + with pytest.raises(FailedEstimationAccessError, match=regex): + fe.params diff --git a/python/cucim/src/cucim/skimage/_shared/utils.py b/python/cucim/src/cucim/skimage/_shared/utils.py index fc23d4376..2d4166f48 100644 --- a/python/cucim/src/cucim/skimage/_shared/utils.py +++ b/python/cucim/src/cucim/skimage/_shared/utils.py @@ -1,13 +1,13 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import functools import inspect import numbers -import sys import warnings from collections.abc import Iterable +from contextlib import contextmanager import cupy as cp import numpy as np @@ -53,8 +53,37 @@ def _ndarray_argwhere(a): return cp.stack(cp.nonzero(a), axis=-1) -def _count_wrappers(func): - """Count the number of wrappers around `func`.""" +def count_inner_wrappers(func): + """Count the number of inner wrappers by unpacking ``__wrapped__``. + + If a wrapped function wraps another wrapped function, then we refer to the + wrapping of the second function as an *inner wrapper*. + + For example, consider this code fragment: + + .. code-block:: python + @wrap_outer + @wrap_inner + def foo(): + pass + + Here ``@wrap_inner`` applies a wrapper to ``foo``, and ``@wrap_outer`` + applies a wrapper to the result. + + Parameters + ---------- + func : callable + The callable of which to determine the number of inner wrappers. + + Returns + ------- + count : int + The number of times `func` has been wrapped. + + See Also + -------- + count_global_wrappers + """ unwrapped = func count = 0 while hasattr(unwrapped, "__wrapped__"): @@ -64,14 +93,15 @@ def _count_wrappers(func): def _warning_stacklevel(func): - """Find stacklevel for a warning raised from a wrapper around `func`. + """Find stacklevel of `func` relative to its global representation. - Try to determine the number of + Determine automatically with which stacklevel a warning should be raised. Parameters ---------- func : Callable - + Tries to find the global version of `func` and counts the number of + additional wrappers around `func`. Returns ------- @@ -79,82 +109,104 @@ def _warning_stacklevel(func): The stacklevel. Minimum of 2. """ # Count number of wrappers around `func` - wrapped_count = _count_wrappers(func) + inner_wrapped_count = count_inner_wrappers(func) + global_wrapped_count = count_global_wrappers(func) - # Count number of total wrappers around global version of `func` - module = sys.modules.get(func.__module__) - try: - for name in func.__qualname__.split("."): - global_func = getattr(module, name) - except AttributeError as e: - raise RuntimeError( - f"Could not access `{func.__qualname__}` in {module!r}, " - f" may be a closure. Set stacklevel manually. ", - ) from e - else: - global_wrapped_count = _count_wrappers(global_func) - - stacklevel = global_wrapped_count - wrapped_count + 1 + stacklevel = global_wrapped_count - inner_wrapped_count + 1 return max(stacklevel, 2) -def _get_stack_length(func): - """Return function call stack length.""" - _func = func.__globals__.get(func.__name__, func) - length = _count_wrappers(_func) - return length +def count_global_wrappers(func): + """Count the total number of times a function as been wrapped globally. + Similar to :func:`count_inner_wrappers`, this counts the number of times + `func` has been wrapped. However, this function doesn't start counting + from `func` but instead tries to access the "global representation" of + `func`. This means that you could use this function from inside a wrapper + that was applied first, and still count wrappers that were applied on + top of it afterwards. -class _DecoratorBaseClass: - """Used to manage decorators' warnings stacklevel. + E.g., `func` might be wrapped by multiple decorators that emit + warnings. In that case, calling this function in the inner-most decorator + will still return the total count of wrappers. - The `_stack_length` class variable is used to store the number of - times a function is wrapped by a decorator. + Parameters + ---------- + func : callable + The callable of which to determine the number of wrappers. Can be a + function or method of a class. - Let `stack_length` be the total number of times a decorated - function is wrapped, and `stack_rank` be the rank of the decorator - in the decorators stack. The stacklevel of a warning is then - `stacklevel = 1 + stack_length - stack_rank`. + Returns + ------- + count : int + The number of times `func` has been wrapped. + + See Also + -------- + count_inner_wrappers """ + if "" in func.__qualname__: + msg = ( + "Cannot determine stacklevel of a function defined in another " + "function's local namespace. Set the stacklevel manually." + ) + raise ValueError(msg) - _stack_length = {} + first_name, *other = func.__qualname__.split(".") + global_func = func.__globals__.get(first_name, func) - def get_stack_length(self, func): - return self._stack_length.get(func.__name__, _get_stack_length(func)) + # Account for `func` being a method, in which case it's an attribute of + # what we got from `func.__globals__` + for part in other: + global_func = getattr(global_func, part, global_func) + + count = count_inner_wrappers(global_func) + assert count >= 0 + return count -class change_default_value(_DecoratorBaseClass): +class change_default_value: """Decorator for changing the default value of an argument. Parameters ---------- - arg_name: str + arg_name : str The name of the argument to be updated. - new_value: any + new_value : any The argument new value. changed_version : str The package version in which the change will be introduced. - warning_msg: str + warning_msg : str Optional warning message. If None, a generic warning message is used. - + stacklevel : {None, int}, optional + If None, the decorator attempts to detect the appropriate stacklevel for the + deprecation warning automatically. This can fail, e.g., due to + decorating a closure, in which case you can set the stacklevel manually + here. The outermost decorator should have stacklevel 2, the next inner + one stacklevel 3, etc. """ def __init__( - self, arg_name, *, new_value, changed_version, warning_msg=None + self, + arg_name, + *, + new_value, + changed_version, + warning_msg=None, + stacklevel=None, ): self.arg_name = arg_name self.new_value = new_value self.warning_msg = warning_msg self.changed_version = changed_version + self.stacklevel = stacklevel def __call__(self, func): parameters = inspect.signature(func).parameters arg_idx = list(parameters.keys()).index(self.arg_name) old_value = parameters[self.arg_name].default - stack_rank = _count_wrappers(func) - if self.warning_msg is None: self.warning_msg = ( f"The new recommended value for {self.arg_name} is " @@ -167,8 +219,12 @@ def __call__(self, func): @functools.wraps(func) def fixed_func(*args, **kwargs): - stacklevel = 1 + self.get_stack_length(func) - stack_rank if len(args) < arg_idx + 1 and self.arg_name not in kwargs.keys(): + stacklevel = ( + self.stacklevel + if self.stacklevel is not None + else _warning_stacklevel(func) + ) # warn that arg_name default value changed: warnings.warn( self.warning_msg, FutureWarning, stacklevel=stacklevel @@ -209,18 +265,23 @@ class deprecate_parameter: template : str, optional If given, this message template is used instead of the default one. new_name : str, optional - If given, the default message will recommend the new parameter name and - an error will be raised if the user uses both old and new names for the + If given, the default message will recommend the new parameter name and an + error will be raised if the user uses both old and new names for the same parameter. + value_adapter : callable, optional + If given, this callable is applied to the deprecated parameter's value + before forwarding it to the new parameter. Useful when the old and new + parameters have different semantics (e.g. exclusive vs. inclusive + thresholds). modify_docstring : bool, optional If the wrapped function has a docstring, add the deprecated parameters to the "Other Parameters" section. - stacklevel : int, optional - This decorator attempts to detect the appropriate stacklevel for the - deprecation warning automatically. If this fails, e.g., due to - decorating a closure, you can set the stacklevel manually. The - outermost decorator should have stacklevel 2, the next inner one - stacklevel 3, etc. + stacklevel : {None, int}, optional + If None, the decorator attempts to detect the appropriate stacklevel for the + deprecation warning automatically. This can fail, e.g., due to + decorating a closure, in which case you can set the stacklevel manually + here. The outermost decorator should have stacklevel 2, the next inner + one stacklevel 3, etc. Notes ----- @@ -272,11 +333,13 @@ def __init__( stop_version, template=None, new_name=None, + value_adapter=None, modify_docstring=True, stacklevel=None, ): self.deprecated_name = deprecated_name self.new_name = new_name + self.value_adapter = value_adapter self.template = template self.start_version = start_version self.stop_version = stop_version @@ -285,17 +348,24 @@ def __init__( def __call__(self, func): parameters = inspect.signature(func).parameters - deprecated_idx = list(parameters.keys()).index(self.deprecated_name) + try: + deprecated_idx = list(parameters.keys()).index(self.deprecated_name) + except ValueError as e: + raise ValueError( + f"{self.deprecated_name!r} not in parameters" + ) from e + + new_idx = False if self.new_name: - new_idx = list(parameters.keys()).index(self.new_name) - else: - new_idx = False + try: + new_idx = list(parameters.keys()).index(self.new_name) + except ValueError as e: + raise ValueError(f"{self.new_name!r} not in parameters") from e if parameters[self.deprecated_name].default is not DEPRECATED: raise RuntimeError( - f"Expected `{self.deprecated_name}` to have the value " - f"{DEPRECATED!r} to indicate its status in the rendered " - "signature." + f"Expected `{self.deprecated_name}` to have the value {DEPRECATED!r} " + f"to indicate its status in the rendered signature." ) if self.template is not None: @@ -358,8 +428,10 @@ def fixed_func(*args, **kwargs): f"only the latter to avoid conflicting values." ) elif self.new_name is not None: - # Assign old value to new one - kwargs[self.new_name] = deprecated_value + value = deprecated_value + if self.value_adapter is not None: + value = self.value_adapter(deprecated_value) + kwargs[self.new_name] = value return func(*args, **kwargs) @@ -380,7 +452,7 @@ def _docstring_add_deprecated(func, kwarg_mapping, deprecated_version): func : function The function whose docstring we wish to update. kwarg_mapping : dict - A dict containing {old_arg: new_arg} key/value pairs as used by + A dict containing {old_arg: new_arg} key/value pairs, see `deprecate_parameter`. deprecated_version : str A major.minor version string specifying when old_arg was @@ -440,6 +512,90 @@ def _docstring_add_deprecated(func, kwarg_mapping, deprecated_version): return final_docstring +class FailedEstimationAccessError(AttributeError): + """Error from use of failed estimation instance + + This error arises from attempts to use an instance of + :class:`FailedEstimation`. + """ + + +class FailedEstimation: + """Class to indicate a failed transform estimation. + + The ``from_estimate`` class method of each transform type may return an + instance of this class to indicate some failure in the estimation process. + + Parameters + ---------- + message : str + Message indicating reason for failed estimation. + + Attributes + ---------- + message : str + Message above. + + Raises + ------ + FailedEstimationAccessError + Exception raised for missing attributes or if the instance is used as a + callable. + """ + + error_cls = FailedEstimationAccessError + + hint = ( + "You can check for a failed estimation by truth testing the returned " + "object. For failed estimations, `bool(estimation_result)` will be `False`. " + "E.g.\n\n" + " if not estimation_result:\n" + " raise RuntimeError(f'Failed estimation: {estimation_result}')" + ) + + def __init__(self, message): + self.message = message + + def __bool__(self): + return False + + def __repr__(self): + return f"{type(self).__name__}({self.message!r})" + + def __str__(self): + return self.message + + def __call__(self, *args, **kwargs): + msg = ( + f"{type(self).__name__} is not callable. {self.message}\n\n" + f"Hint: {self.hint}" + ) + raise self.error_cls(msg) + + def __getattr__(self, name): + msg = ( + f"{type(self).__name__} has no attribute {name!r}. {self.message}\n\n" + f"Hint: {self.hint}" + ) + raise self.error_cls(msg) + + +@contextmanager +def _ignore_deprecated_estimate_warning(): + """Filter warnings about the deprecated `estimate` method. + + Use either as decorator or context manager. + """ + with warnings.catch_warnings(): + warnings.filterwarnings( + action="ignore", + category=FutureWarning, + message="`estimate` is deprecated", + module="skimage", + ) + yield + + class channel_as_last_axis: """Decorator for automatically making channels axis last for all arrays. @@ -522,7 +678,7 @@ def fixed_func(*args, **kwargs): return fixed_func -class deprecate_func(_DecoratorBaseClass): +class deprecate_func: """Decorate a deprecated function and warn when it is called. Adapted from . @@ -536,6 +692,13 @@ class deprecate_func(_DecoratorBaseClass): hint : str, optional A hint on how to address this deprecation, e.g., "Use `skimage.submodule.alternative_func` instead." + stacklevel : {None, int}, optional + If None, the decorator attempts to detect the appropriate stacklevel for the + deprecation warning automatically. This can fail, e.g., due to + decorating a closure, in which case you can set the stacklevel manually + here. The outermost decorator should have stacklevel 2, the next inner + one stacklevel 3, etc. + Examples -------- >>> @deprecate_func( @@ -545,39 +708,49 @@ class deprecate_func(_DecoratorBaseClass): ... ) ... def foo(): ... pass + Calling ``foo`` will warn with:: + FutureWarning: `foo` is deprecated since version 1.0.0 and will be removed in version 1.2.0. Use `bar` instead. """ - def __init__(self, *, deprecated_version, removed_version=None, hint=None): + def __init__( + self, + *, + deprecated_version, + removed_version=None, + hint=None, + stacklevel=None, + ): self.deprecated_version = deprecated_version self.removed_version = removed_version self.hint = hint + self.stacklevel = stacklevel def __call__(self, func): - message = ( - f"`{func.__name__}` is deprecated since version " - f"{self.deprecated_version}" - ) + message = f"`{func.__name__}` is deprecated since version {self.deprecated_version}" if self.removed_version: message += ( f" and will be removed in version {self.removed_version}." ) if self.hint: + # Prepend space and make sure it closes with "." message += f" {self.hint.rstrip('.')}." - stack_rank = _count_wrappers(func) - @functools.wraps(func) def wrapped(*args, **kwargs): - stacklevel = 1 + self.get_stack_length(func) - stack_rank + stacklevel = ( + self.stacklevel + if self.stacklevel is not None + else _warning_stacklevel(func) + ) warnings.warn( message, category=FutureWarning, stacklevel=stacklevel ) return func(*args, **kwargs) - # modify doc string to display deprecation warning + # modify docstring to display deprecation warning doc = f"**Deprecated:** {message}" if wrapped.__doc__ is None: wrapped.__doc__ = doc @@ -587,6 +760,67 @@ def wrapped(*args, **kwargs): return wrapped +def _deprecate_estimate(func, class_name=None): + """Deprecate ``estimate`` method.""" + class_name = ( + func.__qualname__.split(".")[0] if class_name is None else class_name + ) + return deprecate_func( + deprecated_version="0.26", + removed_version="2.2", + hint=f"Please use `{class_name}.from_estimate` class constructor instead.", + stacklevel=2, + )(func) + + +def _deprecate_inherited_estimate(cls): + """Deprecate inherited ``estimate`` instance method. + + This needs a class decorator so we can correctly specify the class of the + `from_estimate` class method in the deprecation message. + """ + + def estimate(self, *args, **kwargs): + return self._estimate(*args, **kwargs) is None + + # The inherited method will always be wrapped by deprecator. + inherited_meth = getattr(cls, "estimate").__wrapped__ + estimate.__doc__ = inherited_meth.__doc__ + estimate.__signature__ = inspect.signature(inherited_meth) + + cls.estimate = _deprecate_estimate(estimate, cls.__name__) + return cls + + +def _update_from_estimate_docstring(cls): + """Fix docstring for inherited ``from_estimate`` class method. + + Even for classes that inherit the `from_estimate` method, and do not + override it, we nevertheless need to change the *docstring* of the + `from_estimate` method to point the user to the current (inheriting) class, + rather than the class in which the method is defined (the inherited class). + + This needs a class decorator so we can modify the docstring of the new + class method. CPython currently does not allow us to modify class method + docstrings by updating ``__doc__``. + """ + + inherited_cmeth = getattr(cls, "from_estimate") + + def from_estimate(cls, *args, **kwargs): + return inherited_cmeth(*args, **kwargs) + + inherited_class_name = inherited_cmeth.__qualname__.split(".")[-2] + + from_estimate.__doc__ = inherited_cmeth.__doc__.replace( + inherited_class_name, cls.__name__ + ) + from_estimate.__signature__ = inspect.signature(inherited_cmeth) + + cls.from_estimate = classmethod(from_estimate) + return cls + + def get_bound_method_class(m): """Return the class for a bound method.""" return m.__self__.__class__ @@ -825,9 +1059,10 @@ def _validate_interpolation_order(image_dtype, order): ---------- image_dtype : dtype Image dtype. - order : int, optional - The order of the spline interpolation. The order has to be in - the range 0-5. See `skimage.transform.warp` for detail. + order : {None, int}, optional + The order of the spline interpolation. The order has to be in the range + 0-5. If ``None`` assume order 0 for Boolean images, otherwise 1. See + `skimage.transform.warp` for detail. Returns ------- diff --git a/python/cucim/src/cucim/skimage/color/colorconv.py b/python/cucim/src/cucim/skimage/color/colorconv.py index b2a47b833..29a0ad3d0 100644 --- a/python/cucim/src/cucim/skimage/color/colorconv.py +++ b/python/cucim/src/cucim/skimage/color/colorconv.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause """Functions for converting between color spaces. @@ -1898,7 +1898,7 @@ def separate_stains(rgb, conv_matrix, *, channel_axis=-1): rgb : (..., C=3, ...) array_like The image in RGB format. By default, the final dimension denotes channels. - conv_matrix: ndarray + conv_matrix : ndarray The stain separation matrix as described by G. Landini [1]_. channel_axis : int, optional This parameter indicates which axis of the array corresponds to @@ -2008,7 +2008,7 @@ def combine_stains(stains, conv_matrix, *, channel_axis=-1): stains : (..., C=3, ...) array_like The image in stain color space. By default, the final dimension denotes channels. - conv_matrix: ndarray + conv_matrix : ndarray The stain separation matrix as described by G. Landini [1]_. channel_axis : int, optional This parameter indicates which axis of the array corresponds to diff --git a/python/cucim/src/cucim/skimage/color/delta_e.py b/python/cucim/src/cucim/skimage/color/delta_e.py index 6a16af034..64333a3e3 100644 --- a/python/cucim/src/cucim/skimage/color/delta_e.py +++ b/python/cucim/src/cucim/skimage/color/delta_e.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause """ @@ -16,9 +16,9 @@ The delta-E notation comes from the German word for "Sensation" (Empfindung). -Reference ---------- -https://en.wikipedia.org/wiki/Color_difference +References +---------- +.. [1] https://en.wikipedia.org/wiki/Color_difference """ diff --git a/python/cucim/src/cucim/skimage/color/tests/test_delta_e.py b/python/cucim/src/cucim/skimage/color/tests/test_delta_e.py index 3a2179563..4a8133cc3 100644 --- a/python/cucim/src/cucim/skimage/color/tests/test_delta_e.py +++ b/python/cucim/src/cucim/skimage/color/tests/test_delta_e.py @@ -1,9 +1,11 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause """Test for correctness of color distance functions""" +from pathlib import Path + import cupy as cp import numpy as np import pytest @@ -12,7 +14,7 @@ assert_array_almost_equal, ) -from cucim.skimage._shared.testing import expected_warnings, fetch +from cucim.skimage._shared.testing import expected_warnings from cucim.skimage._shared.utils import _supported_float_type from cucim.skimage.color.delta_e import ( deltaE_cie76, @@ -21,6 +23,8 @@ deltaE_cmc, ) +test_data_dir = Path(__file__).absolute().parent + @pytest.mark.parametrize("channel_axis", [0, 1, -1]) @pytest.mark.parametrize("dtype", [cp.float32, cp.float64]) @@ -78,8 +82,7 @@ def load_ciede2000_data(): ] # note: ciede_test_data.txt contains several intermediate quantities - path = fetch("color/tests/ciede2000_test_data.txt") - return np.loadtxt(path, dtype=dtype) + return np.loadtxt(test_data_dir / "ciede2000_test_data.txt", dtype=dtype) @pytest.mark.parametrize("channel_axis", [0, 1, -1]) diff --git a/python/cucim/src/cucim/skimage/data/_binary_blobs.py b/python/cucim/src/cucim/skimage/data/_binary_blobs.py index 030305cb0..8b0537c03 100644 --- a/python/cucim/src/cucim/skimage/data/_binary_blobs.py +++ b/python/cucim/src/cucim/skimage/data/_binary_blobs.py @@ -1,6 +1,7 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause +import warnings import cupy as cp @@ -13,6 +14,8 @@ def binary_blobs( n_dim=2, volume_fraction=0.5, rng=None, + *, + boundary_mode="nearest", ): """ Generate synthetic binary image with several rounded blob-like objects. @@ -33,6 +36,20 @@ def binary_blobs( Pseudo-random number generator. By default, a PCG64 generator is used (see :func:`cupy.random.default_rng`). If `rng` is an int, it is used to seed the generator. + boundary_mode : {'nearest', 'wrap'}, optional + The blobs are created by smoothing and then thresholding an + array consisting of ones at seed positions. This mode determines which values are + filled in when the smoothing kernel overlaps the seed array's boundary. + + 'nearest' (`a a a a | a b c d | d d d d`) + By default, when applying the Gaussian filter, the seed array is extended by replicating the last + boundary value. This will increase the size of blobs whose seed or + center lies exactly on the edge. + + 'wrap' (`a b c d | a b c d | a b c d`) + The seed array is extended by wrapping around to the opposite edge. + The resulting blob array can be tiled and blobs will be contiguous and + have smooth edges across tile boundaries. Returns ------- @@ -61,12 +78,35 @@ def binary_blobs( >>> # Blobs cover a smaller volume fraction of the image >>> blobs = data.binary_blobs(length=256, volume_fraction=0.3) """ # noqa: E501 + if boundary_mode not in {"nearest", "wrap"}: + raise ValueError(f"unsupported `boundary_mode`: {boundary_mode!r}") + + blob_size = blob_size_fraction * length + if blob_size < 0.1: + clamped_size_fraction = 0.1 / length + clamped_blob_size = clamped_size_fraction * length + warnings.warn( + f"`{blob_size_fraction=}` together with `{length=}` would result in a blob " + f"size of {blob_size} pixels. Small blob sizes likely lead to unexpected " + f"results! " + f"Clamping to `blob_size_fraction={clamped_size_fraction}` and a blob size " + f"of {clamped_blob_size} pixels to avoid allocating excessive memory.", + category=RuntimeWarning, + stacklevel=2, + ) + blob_size_fraction = clamped_size_fraction + rs = cp.random.default_rng(rng) shape = tuple([length] * n_dim) mask = cp.zeros(shape) n_pts = max(int(1.0 / blob_size_fraction) ** n_dim, 1) points = (length * rs.random((n_dim, n_pts))).astype(int) mask[tuple(indices for indices in points)] = 1 - mask = gaussian(mask, sigma=0.25 * length * blob_size_fraction) + mask = gaussian( + mask, + sigma=0.25 * length * blob_size_fraction, + preserve_range=False, + mode=boundary_mode, + ) threshold = cp.percentile(mask, 100 * (1 - volume_fraction)) return cp.logical_not(mask < threshold) diff --git a/python/cucim/src/cucim/skimage/data/tests/test_data.py b/python/cucim/src/cucim/skimage/data/tests/test_data.py index 4ce7b25a9..165cc698b 100644 --- a/python/cucim/src/cucim/skimage/data/tests/test_data.py +++ b/python/cucim/src/cucim/skimage/data/tests/test_data.py @@ -1,11 +1,13 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp +import pytest from numpy.testing import assert_almost_equal from cucim.skimage import data +from cucim.skimage._shared.testing import assert_stacklevel def test_binary_blobs(): @@ -19,3 +21,29 @@ def test_binary_blobs(): length=32, volume_fraction=0.25, n_dim=3 ) assert not cp.all(blobs == other_realization) + + +def test_binary_blobs_boundary(): + # Assert that `boundary_mode="wrap"` decreases the pixel difference on + # opposing borders compared to `boundary_mode="nearest"` + blobs_near = data.binary_blobs(length=300, boundary_mode="nearest", rng=3) + blobs_wrap = data.binary_blobs(length=300, boundary_mode="wrap", rng=3) + + diff_near_ax0 = blobs_near[0, :] ^ blobs_near[-1, :] + diff_wrap_ax0 = blobs_wrap[0, :] ^ blobs_wrap[-1, :] + assert diff_wrap_ax0.sum() < diff_near_ax0.sum() + + diff_near_ax1 = blobs_near[:, 0] ^ blobs_near[:, -1] + diff_wrap_ax1 = blobs_wrap[:, 0] ^ blobs_wrap[:, -1] + assert diff_wrap_ax1.sum() < diff_near_ax1.sum() + + +def test_binary_blobs_small_blob_size(): + # A very small `blob_size_fraction` in relation to `length` will allocate + # excessive memory and likely leads to unexpected results. Check that this + # is gracefully handled + regex = ".* Clamping to .* blob size of 0.1 pixels" + with pytest.warns(RuntimeWarning, match=regex) as record: + result = data.binary_blobs(100, rng=3, blob_size_fraction=0.0009) + assert_stacklevel(record) + cp.testing.assert_array_equal(result, 1) diff --git a/python/cucim/src/cucim/skimage/exposure/exposure.py b/python/cucim/src/cucim/skimage/exposure/exposure.py index f040df7d3..85856637f 100644 --- a/python/cucim/src/cucim/skimage/exposure/exposure.py +++ b/python/cucim/src/cucim/skimage/exposure/exposure.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp @@ -74,7 +74,7 @@ def _bincount_histogram(image, source_range, bin_centers=None): ---------- image : array Input image. - source_range : string + source_range : {'image', 'dtype'} 'image' determines the range from the input image. 'dtype' determines the range from the expected range of the images of that data type. @@ -114,7 +114,7 @@ def _get_outer_edges(image, hist_range): ---------- image : ndarray Image for which the histogram is to be computed. - hist_range: 2-tuple of int or None + hist_range : 2-tuple of int or None Range of values covered by the histogram bins. If None, the minimum and maximum values of `image` are used. @@ -169,7 +169,7 @@ def _get_bin_edges(image, nbins, hist_range): Image for which the histogram is to be computed. nbins : int The number of bins. - hist_range: 2-tuple of int + hist_range : 2-tuple of int Range of values covered by the histogram bins. Returns @@ -237,7 +237,7 @@ def histogram( nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. - source_range : string, optional + source_range : {'image', 'dtype'}, optional 'image' (default) determines the range from the input image. 'dtype' determines the range from the expected range of the images of that data type. @@ -317,7 +317,7 @@ def _histogram(image, bins, source_range, normalize): containing the bin centers can also be provided. For images with floating point dtype, this can be an array of bin_edges for use by ``np.histogram``. - source_range : string, optional + source_range : {'image', 'dtype'}, optional 'image' (default) determines the range from the input image. 'dtype' determines the range from the expected range of the images of that data type. @@ -659,11 +659,12 @@ def _adjust_gamma_u8(image, gamma, gain): def adjust_gamma(image, gamma=1, gain=1): - """Performs Gamma Correction on the input image. + """Perform gamma correction on the input image. - Also known as Power Law Transform. - This function transforms the input image pixelwise according to the - equation ``O = I**gamma`` after scaling each pixel to the range 0 to 1. + Gamma correction is a power-law transform [1]_. This function + transforms the input `image` pixel-wise according to the power law + ``image**gamma`` after scaling each pixel to the range 0 to 1. Then + it is rescaled to its original range and multiplied by `gain`. Parameters ---------- @@ -697,9 +698,9 @@ def adjust_gamma(image, gamma=1, gain=1): Examples -------- - >>> from skimage import data + >>> import skimage as ski >>> from cucim.skimage import exposure, img_as_float - >>> image = img_as_float(cp.array(data.moon())) + >>> image = img_as_float(cp.array(ski.data.moon())) >>> gamma_corrected = exposure.adjust_gamma(image, 2) >>> # Output is darker for gamma > 1 >>> image.mean() > gamma_corrected.mean() @@ -715,9 +716,8 @@ def adjust_gamma(image, gamma=1, gain=1): else: _assert_non_negative(image) - scale = float( - dtype_limits(image, True)[1] - dtype_limits(image, True)[0] - ) + limits = dtype_limits(image, clip_negative=True) + scale = float(limits[1] - limits[0]) out = (((image / scale) ** gamma) * scale * gain).astype(dtype) diff --git a/python/cucim/src/cucim/skimage/feature/_basic_features.py b/python/cucim/src/cucim/skimage/feature/_basic_features.py index 1e9e43335..8bce71d6e 100644 --- a/python/cucim/src/cucim/skimage/feature/_basic_features.py +++ b/python/cucim/src/cucim/skimage/feature/_basic_features.py @@ -1,10 +1,11 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import functools import itertools import math +import warnings from itertools import combinations_with_replacement import cupy as cp @@ -48,7 +49,7 @@ def _mutiscale_basic_features_singlechannel( sigma_min=0.5, sigma_max=16, num_sigma=None, - num_workers=None, + workers=None, ): """Features for a single channel nd image. @@ -74,9 +75,10 @@ def _mutiscale_basic_features_singlechannel( num_sigma : int, optional Number of values of the Gaussian kernel between sigma_min and sigma_max. If None, sigma_min multiplied by powers of 2 are used. - num_workers : int or None, optional + workers : int or None, optional The number of parallel threads to use. If set to ``None``, the full - set of available cores are used. + set of available cores are used. This argument is not used by the + cuCIM implementation as work is done by the GPU. Returns ------- @@ -113,7 +115,7 @@ def multiscale_basic_features( sigma_min=0.5, sigma_max=16, num_sigma=None, - num_workers=None, + workers=None, *, channel_axis=None, ): @@ -144,9 +146,10 @@ def multiscale_basic_features( num_sigma : int, optional Number of values of the Gaussian kernel between sigma_min and sigma_max. If None, sigma_min multiplied by powers of 2 are used. - num_workers : int or None, optional + workers : int or None, optional The number of parallel threads to use. If set to ``None``, the full - set of available cores are used. + set of available cores are used. This argument is not used by the + cuCIM implementation as work is done by the GPU. channel_axis : int or None, optional If None, the image is assumed to be a grayscale (single channel) image. Otherwise, this parameter indicates which axis of the array corresponds @@ -165,6 +168,11 @@ def multiscale_basic_features( "At least one of `intensity`, `edges` or `textures`" "must be True for features to be computed." ) + if workers is not None: + warnings.warn( + f"Workers was set to {workers}, but will be ignored by the cuCIM " + "implementation (work is launched on the GPU, not CPU threads)." + ) if channel_axis is None: image = image[..., np.newaxis] channel_axis = -1 @@ -180,7 +188,7 @@ def multiscale_basic_features( sigma_min=sigma_min, sigma_max=sigma_max, num_sigma=num_sigma, - num_workers=num_workers, + workers=workers, ) for dim in range(image.shape[-1]) ) diff --git a/python/cucim/src/cucim/skimage/feature/blob.py b/python/cucim/src/cucim/skimage/feature/blob.py index 4a736f484..0aa4a2463 100644 --- a/python/cucim/src/cucim/skimage/feature/blob.py +++ b/python/cucim/src/cucim/skimage/feature/blob.py @@ -193,14 +193,14 @@ def blob_dog( Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : scalar or sequence of scalars, optional - The minimum standard deviation for Gaussian kernel. Keep this low to - detect smaller blobs. The standard deviations of the Gaussian filter - are given for each axis as a sequence, or as a single number, in + Minimum standard deviation for Gaussian kernel. Keep this value low to + detect smaller blobs. The standard deviation of the Gaussian kernel + is given either as a sequence for each axis, or as a single number, in which case it is equal for all axes. max_sigma : scalar or sequence of scalars, optional The maximum standard deviation for Gaussian kernel. Keep this high to - detect larger blobs. The standard deviations of the Gaussian filter - are given for each axis as a sequence, or as a single number, in + detect larger blobs. The standard deviation of the Gaussian kernel + is given either as a sequence for each axis, or as a single number, in which case it is equal for all axes. sigma_ratio : float, optional The ratio between the standard deviation of Gaussian Kernels used for @@ -384,18 +384,19 @@ def blob_log( Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : scalar or sequence of scalars, optional - the minimum standard deviation for Gaussian kernel. Keep this low to - detect smaller blobs. The standard deviations of the Gaussian filter - are given for each axis as a sequence, or as a single number, in + Minimum standard deviation for Gaussian kernel. Keep this value low to + detect smaller blobs. The standard deviation of the Gaussian kernel + is given either as a sequence for each axis, or as a single number, in which case it is equal for all axes. max_sigma : scalar or sequence of scalars, optional The maximum standard deviation for Gaussian kernel. Keep this high to - detect larger blobs. The standard deviations of the Gaussian filter - are given for each axis as a sequence, or as a single number, in + detect larger blobs. The standard deviation of the Gaussian kernel + is given either as a sequence for each axis, or as a single number, in which case it is equal for all axes. num_sigma : int, optional - The number of intermediate values of standard deviations to consider - between `min_sigma` and `max_sigma`. + The number of evenly spaced values for standard deviation of the + Gaussian kernel to consider on the closed interval + ``[min_sigma, max_sigma]``. threshold : float or None, optional The absolute lower bound for scale space maxima. Local maxima smaller than `threshold` are ignored. Reduce this to detect blobs with lower @@ -554,13 +555,20 @@ def blob_doh( Input grayscale image.Blobs can either be light on dark or vice versa. min_sigma : float, optional The minimum standard deviation for Gaussian Kernel used to compute - Hessian matrix. Keep this low to detect smaller blobs. + Hessian matrix. Keep this value low to detect smaller blobs. + The standard deviation of the Gaussian kernel is given either as a + sequence for each axis, or as a single number, in which case it is + equal for all axes. max_sigma : float, optional The maximum standard deviation for Gaussian Kernel used to compute - Hessian matrix. Keep this high to detect larger blobs. + Hessian matrix. Keep this value high to detect larger blobs. + The standard deviation of the Gaussian kernel is given either as a + sequence for each axis, or as a single number, in which case it is + equal for all axes. num_sigma : int, optional - The number of intermediate values of standard deviations to consider - between `min_sigma` and `max_sigma`. + The number of evenly spaced values for standard deviation of the + Gaussian kernel to consider on the closed interval + ``[min_sigma, max_sigma]``. threshold : float or None, optional The absolute lower bound for scale space maxima. Local maxima smaller than `threshold` are ignored. Reduce this to detect blobs with lower diff --git a/python/cucim/src/cucim/skimage/feature/corner.py b/python/cucim/src/cucim/skimage/feature/corner.py index babe32af4..2d2a960ae 100644 --- a/python/cucim/src/cucim/skimage/feature/corner.py +++ b/python/cucim/src/cucim/skimage/feature/corner.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import functools @@ -292,7 +292,7 @@ def hessian_matrix( forward order of the image axes in gradient computation. 'rc' indicates the use of the first axis initially (Hrr, Hrc, Hcc), whilst 'xy' indicates the usage of the last axis initially (Hxx, Hxy, Hyy). - use_gaussian_derivatives : boolean, optional + use_gaussian_derivatives : bool, optional Indicates whether the Hessian is computed by convolving with Gaussian derivatives, or by a simple finite-difference operation. @@ -543,7 +543,7 @@ def _image_orthogonal_matrix22_eigvals( example, ``M01 = M[0, 1]``. sort : {"ascending", "descending"}, optional Eigenvalues should be sorted in the specified order. - abs_sort : boolean, optional + abs_sort : bool, optional If ``True``, sort based on the absolute values. References @@ -676,7 +676,7 @@ def _image_orthogonal_matrix33_eigvals( shown above. For example, ``d = M[0, 1]``. sort : {"ascending", "descending"}, optional Eigenvalues should be sorted in the specified order. - abs_sort : boolean, optional + abs_sort : bool, optional If ``True``, sort based on the absolute values. References @@ -707,7 +707,7 @@ def _symmetric_compute_eigenvalues(S_elems, sort="descending", abs_sort=False): `hessian_matrix` or `structure_tensor`. sort : {"ascending", "descending"}, optional Eigenvalues should be sorted in the specified order. - abs_sort : boolean, optional + abs_sort : bool, optional If ``True``, sort based on the absolute values. Returns diff --git a/python/cucim/src/cucim/skimage/measure/_colocalization.py b/python/cucim/src/cucim/skimage/measure/_colocalization.py index f3847cb27..38df63e82 100644 --- a/python/cucim/src/cucim/skimage/measure/_colocalization.py +++ b/python/cucim/src/cucim/skimage/measure/_colocalization.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2023-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp @@ -100,19 +100,18 @@ def pearson_corr_coeff(image0, image1, mask=None): def manders_coloc_coeff(image0, image1_mask, mask=None): - r"""Manders' colocalization coefficient between two channels. + r"""Manders' colocalization coefficient between two image channels. Parameters ---------- image0 : (M, N) ndarray - Image of channel A. All pixel values should be non-negative. + Input image (first channel). All pixel values should be non-negative. image1_mask : (M, N) ndarray of dtype bool - Binary mask with segmented regions of interest in channel B. - Must have same dimensions as `image0`. + Binary image giving the regions of interest in the second channel. + Must have same shape as `image0`. mask : (M, N) ndarray of dtype bool, optional - Only `image0` pixel values within this region of interest mask are - included in the calculation. - Must have same dimensions as `image0`. + Only `image0` pixel values within `mask` are included in the calculation. + Must have same shape as `image0`. Returns ------- @@ -121,33 +120,35 @@ def manders_coloc_coeff(image0, image1_mask, mask=None): Notes ----- - Manders' Colocalization Coefficient (MCC) is the fraction of total - intensity of a certain channel (channel A) that is within the segmented - region of a second channel (channel B) [1]_. It ranges from 0 for no - colocalisation to 1 for complete colocalization. It is also referred to - as M1 and M2. + Manders' colocalization coefficient (MCC) was developed in the context of + confocal biological microscopy, to measure the fraction of colocalizing + objects in each component of a dual-channel image. Out of the total + intensity of, say, channel A, how much is found within the features + (objects) of, say, channel B [1]_? The measure thus ranges from 0 for no + colocalization to 1 for complete colocalization. MCC is commonly used to measure the colocalization of a particular protein - in a subceullar compartment. Typically a segmentation mask for channel B - is generated by setting a threshold that the pixel values must be above - to be included in the MCC calculation. In this implementation, - the channel B mask is provided as the argument `image1_mask`, allowing - the exact segmentation method to be decided by the user beforehand. + in a subcelullar compartment. Typically, the mask for channel B is + obtained by thresholding, to segment the features from the background. + In this implementation, channel B is passed directly as a mask + (`image1_mask`), leaving the segmentation step to the user (upstream). The implemented equation is: .. math:: - r = \frac{\sum A_{i,coloc}}{\sum A_i} + + mcc = \frac{\sum_i A_{i,coloc}}{\sum_i A_i} where - :math:`A_i` is the value of the :math:`i^{th}` pixel in `image0` - :math:`A_{i,coloc} = A_i` if :math:`Bmask_i > 0` - :math:`Bmask_i` is the value of the :math:`i^{th}` pixel in - `mask` + + - :math:`A_i` is the value of the :math:`i^{th}` pixel in `image0`, and + - :math:`A_{i, coloc} = A_i B_i`, considering that :math:`B_i` is the + (``True`` or ``False``) value of the :math:`i^{th}` pixel in + `image1_mask` cast into int or float (``1`` or ``0``, respectively). MCC is sensitive to noise, with diffuse signal in the first channel - inflating its value. Images should be processed to remove out of focus and - background light before the MCC is calculated [2]_. + inflating its value. Therefore, images should be processed beforehand to + remove out-of-focus and background light [2]_. References ---------- diff --git a/python/cucim/src/cucim/skimage/measure/_moments.py b/python/cucim/src/cucim/skimage/measure/_moments.py index 55cbfae9c..95e7fb8cb 100644 --- a/python/cucim/src/cucim/skimage/measure/_moments.py +++ b/python/cucim/src/cucim/skimage/measure/_moments.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import itertools @@ -182,7 +182,7 @@ def moments(image, order=3, *, spacing=None): Rasterized shape as image. order : int, optional Maximum order of moments. Default is 3. - spacing: tuple of float, shape (ndim,) + spacing : tuple of float, shape (ndim,) The pixel spacing along each axis of the image. Returns @@ -249,7 +249,7 @@ def moments_central(image, center=None, order=3, *, spacing=None, **kwargs): is not provided. order : int, optional The maximum order of moments computed. - spacing: tuple of float, shape (ndim,) + spacing : tuple of float, shape (ndim,) The pixel spacing along each axis of the image. Returns @@ -372,7 +372,7 @@ def moments_normalized(mu, order=3, spacing=None): to ``order``. order : int, optional Maximum order of moments. Default is 3. - spacing: tuple of float, shape (ndim,) + spacing : tuple of float, shape (ndim,) The pixel spacing along each axis of the image. Returns @@ -497,7 +497,7 @@ def centroid(image, *, spacing=None): ---------- image : array The input image. - spacing: tuple of float, shape (ndim,) + spacing : tuple of float, shape (ndim,) The pixel spacing along each axis of the image. Returns diff --git a/python/cucim/src/cucim/skimage/measure/_regionprops.py b/python/cucim/src/cucim/skimage/measure/_regionprops.py index 7f83a85af..2d563c7c0 100644 --- a/python/cucim/src/cucim/skimage/measure/_regionprops.py +++ b/python/cucim/src/cucim/skimage/measure/_regionprops.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import inspect @@ -25,9 +25,8 @@ # All values in this PROPS dict correspond to current scikit-image property -# names. The keys in this PROPS dict correspond to older names used in prior -# releases. For backwards compatibility, these older names will continue to -# work, but will not be documented. +# names. The keys in this PROPS dict correspond to deprecated names used in +# prior releases PROPS = { "Area": "area", "BoundingBox": "bbox", @@ -41,7 +40,6 @@ "ConvexImage": "image_convex", "convex_image": "image_convex", "Coordinates": "coords", - "coords_scaled": "coords_scaled", "Eccentricity": "eccentricity", "EquivDiameter": "equivalent_diameter_area", "equivalent_diameter": "equivalent_diameter_area", @@ -76,7 +74,6 @@ "minor_axis_length": "axis_minor_length", "Moments": "moments", "NormalizedMoments": "moments_normalized", - "num_pixels": "num_pixels", "Orientation": "orientation", "Perimeter": "perimeter", "CroftonPerimeter": "perimeter_crofton", @@ -126,6 +123,7 @@ "inertia_tensor_eigvals": float, "intensity_max": float, "intensity_mean": float, + # 'intensity_median': float, # TODO: uncomment once supported "intensity_min": float, "intensity_std": float, "label": int, @@ -153,6 +151,7 @@ "image_intensity", "intensity_max", "intensity_mean", + "intensity_median", "intensity_min", "intensity_std", "moments_weighted", @@ -165,7 +164,7 @@ def _infer_number_of_required_args(func): - """Infer the number of required arguments for a function + """Infer the number of required arguments for a given function Parameters ---------- @@ -175,7 +174,7 @@ def _infer_number_of_required_args(func): Returns ------- n_args : int - The number of required arguments of func. + The number of required arguments for `func`. """ argspec = inspect.getfullargspec(func) n_args = len(argspec.args) @@ -185,7 +184,7 @@ def _infer_number_of_required_args(func): def _infer_regionprop_dtype(func, *, intensity, ndim): - """Infer the dtype of a region property calculated by func. + """Infer the dtype of a region property calculated by `func`. If a region property function always returns the same shape and type of output regardless of input size, then the dtype is the dtype of the @@ -195,11 +194,11 @@ def _infer_regionprop_dtype(func, *, intensity, ndim): ---------- func : callable Function to be tested. The signature should be array[bool] -> Any if - intensity is False, or *(array[bool], array[float]) -> Any otherwise. + `intensity` is False, or *(array[bool], array[float]) -> Any otherwise. intensity : bool - Whether the regionprop is calculated on an intensity image. + Whether the regionprop is calculated using an intensity image. ndim : int - The number of dimensions for which to check func. + The number of dimensions for which to check `func`. Returns ------- @@ -263,8 +262,26 @@ def func2d(self, *args, **kwargs): class RegionProperties: - """Please refer to `skimage.measure.regionprops` for more information + """Provides properties of a labeled image region. + + Please refer to `cucim.skimage.measure.regionprops` for more information on the available region properties. + + Notes + ----- + For GPU computation of regionprops it is highly recommended to instead + use regionprops_table for efficient batch computation over all labels. + + Examples + -------- + >>> RegionProperties( + ... slice=(slice(0, 2), slice(0, 4)), + ... label=2, + ... label_image=np.array([[0, 1, 1, 2, 0], [2, 2, 2, 2, 0]]), + ... intensity_image=None, + ... cache_active=False, + ... ) + """ def __init__( @@ -379,12 +396,19 @@ def __getattr__(self, attr): f"Attribute '{attr}' unavailable when `intensity_image` " f"has not been specified." ) + warn( + f"`RegionProperties.{attr}` is deprecated and will be " + "removed in a future version after the scikit-image 2.0 " + f"release. Use `RegionProperties.{PROPS[attr]}` instead. ", + category=FutureWarning, + stacklevel=2, + ) + # retrieve deprecated property (excluding old CamelCase ones) return getattr(self, PROPS[attr]) - else: - raise AttributeError( - f"'{type(self)}' object has no attribute '{attr}'" - ) + + # Fallback to default behavior, potentially raising an attribute error + return self.__getattribute__(attr) def __setattr__(self, name, value): if name in PROPS: @@ -408,7 +432,7 @@ def bbox(self): Returns ------- A tuple of the bounding box's start coordinates for each dimension, - followed by the end coordinates for each dimension + followed by the end coordinates for each dimension. """ return tuple( [self.slice[i].start for i in range(self._ndim)] @@ -470,7 +494,7 @@ def equivalent_diameter_area(self): def euler_number(self): if self._ndim not in [2, 3]: raise NotImplementedError( - "Euler number is implemented for 2D or 3D images only" + "Euler number is implemented for 2D and 3D images only" ) return euler_number(self.image, self._ndim) @@ -483,7 +507,9 @@ def feret_diameter_max(self): from skimage.measure import find_contours, marching_cubes # TODO: implement marching cubes, etc. - warn("feret diameter_max currently not implemented on GPU.") + warn( + "feret diameter_max not currently implemented on GPU.", stacklevel=3 + ) identity_convex_hull = pad( self.image_convex, 2, mode="constant", constant_values=0 ) @@ -559,6 +585,10 @@ def intensity_max(self): def intensity_mean(self): return cp.mean(self.image_intensity[self.image], axis=0) + @property + def intensity_median(self): + return cp.median(self.image_intensity[self.image], axis=0) + @property def intensity_min(self): vals = self.image_intensity[self.image] @@ -576,7 +606,8 @@ def axis_major_length(self): return 4 * math.sqrt(l1) elif self._ndim == 3: ev = self.inertia_tensor_eigvals - return math.sqrt(10 * (ev[0] + ev[1] - ev[2])) + l2 = 10 * (ev[0] + ev[1] - ev[2]) + return math.sqrt(max(0, l2)) else: raise ValueError("axis_major_length only available in 2D and 3D") @@ -589,7 +620,8 @@ def axis_minor_length(self): ev = self.inertia_tensor_eigvals # use max to avoid possibly very small negative value due to # numeric error - return math.sqrt(10 * max(-ev[0] + ev[1] + ev[2], 0.0)) + l2 = 10 * max(-ev[0] + ev[1] + ev[2], 0.0) + return math.sqrt(max(0, l2)) else: raise ValueError("axis_minor_length only available in 2D and 3D") @@ -762,11 +794,16 @@ def __iter__(self): return iter(sorted(props)) def __getitem__(self, key): - value = getattr(self, key, None) - if value is not None: - return value - else: # backwards compatibility - return getattr(self, PROPS[key]) + if key in PROPS: + warn( + f"`RegionProperties[{key!r}]` is deprecated and will be " + "removed in a future version after the scikit-image 2.0 " + f"release. Use `RegionProperties[{PROPS[key]!r}]` instead. ", + category=FutureWarning, + stacklevel=2, + ) + key = PROPS[key] + return getattr(self, key) def __eq__(self, other): if not isinstance(other, RegionProperties): @@ -788,6 +825,11 @@ def __eq__(self, other): return True + def __repr__(self): + cls_name = type(self).__qualname__ + out = f"<{cls_name}: label={self.label!r}, bbox={self.bbox}>" + return out + # For compatibility with code written prior to 0.16 _RegionProperties = RegionProperties @@ -959,19 +1001,30 @@ def regionprops_table( spacing=None, batch_processing=True, ): - """Compute image properties and return them as a pandas-compatible table. - - The table is a dictionary mapping column names to value arrays. See Notes - section below for details. + """Compute region properties and return them as a pandas-compatible table. + + The return value is a dictionary mapping property names to value arrays. + This dictionary can be used as input to ``pandas.DataFrame`` to result in + a "tidy" [1]_ table with one region per row and one property per column. + + Use this function typically when you want to do downstream data analysis, + or save region data to disk in a structured way. One downside of this + function is that it breaks multi-dimensional properties into independent + columns; for example, the region centroids of a 3D image end up in three + different columns, one per dimension. If you need to do complex + computations with the region properties, using + :func:`cucim.skimage.measure.regionprops_gpu.regionprops_dict` returns + a dict where multi-dimensional properties have not been split into + regions. .. versionadded:: 0.16 Parameters ---------- label_image : (M, N[, P]) ndarray - Labeled input image. Labels with value 0 are ignored. + Label image. Labels with value 0 are ignored. intensity_image : (M, N[, P][, C]) ndarray, optional - Intensity (i.e., input) image with same size as labeled image, plus + Intensity (input) image of same shape as labeled image, plus optionally an extra dimension for multichannel data. The channel dimension, if present, must be the last axis. Default is None. @@ -997,7 +1050,7 @@ def regionprops_table( Object columns are those that cannot be split in this way because the number of columns would change depending on the object. For example, ``image`` and ``coords``. - extra_properties : Iterable of callables + extra_properties : iterable of callables Add extra property computation functions that are not included with skimage. The name of the property is derived from the function name, the dtype is inferred by calling the function on a small sample. @@ -1006,7 +1059,7 @@ def regionprops_table( issued. A property computation function must take a region mask as its first argument. If the property requires an intensity image, it must accept the intensity image as the second argument. - spacing: tuple of float, shape (ndim,) + spacing : tuple of float, shape (ndim,) The pixel spacing along each axis of the image. Extra Parameters @@ -1046,6 +1099,12 @@ def regionprops_table( size), an object array will be used, with the corresponding property name as the key. + References + ---------- + .. [1] Wickham, H (2014) "Tidy Data" Journal of Statistical Software, + 59(10), 1–23. https://doi.org/10.18637/jss.v059.i10 + https://vita.had.co.nz/papers/tidy-data.pdf + Examples -------- >>> from skimage import data, util, measure @@ -1173,10 +1232,18 @@ def regionprops( ): r"""Measure properties of labeled image regions. + Region properties are evaluated on demand and come in diverse types. If + you want to do tabular data analysis of specific properties, consider + using :func:`skimage.measure.regionprops_table` instead. + + For computation on the GPU, it is highly recommended to use + `regionprops_table` for batch processing of all region properties in an + efficient manner. + Parameters ---------- label_image : (M, N[, P]) ndarray - Labeled input image. Labels with value 0 are ignored. + Label image. Labels with value 0 are ignored. .. versionchanged:: 0.14.1 Previously, ``label_image`` was processed by ``numpy.squeeze`` and @@ -1185,7 +1252,7 @@ def regionprops( recover the old behaviour, use ``regionprops(np.squeeze(label_image), ...)``. intensity_image : (M, N[, P][, C]) ndarray, optional - Intensity (i.e., input) image with same size as labeled image, plus + Intensity (input) image of same shape as labeled image, plus optionally an extra dimension for multichannel data. Currently, this extra channel dimension, if present, must be the last axis. Default is None. @@ -1196,23 +1263,23 @@ def regionprops( Determine whether to cache calculated properties. The computation is much faster for cached properties, whereas the memory consumption increases. - extra_properties : Iterable of callables + extra_properties : iterable of callables Add extra property computation functions that are not included with skimage. The name of the property is derived from the function name, - the dtype is inferred by calling the function on a small sample. + and its dtype is inferred by calling the function on a small sample. If the name of an extra property clashes with the name of an existing - property the extra property will not be visible and a UserWarning is - issued. A property computation function must take a region mask as its - first argument. If the property requires an intensity image, it must - accept the intensity image as the second argument. - spacing: tuple of float, shape (ndim,) + property the extra property will not be visible and a UserWarning will + be issued. A property computation function must take `label_image` as + its first argument. If the property requires an intensity image, it + must accept `intensity_image` as the second argument. + spacing : tuple of float, shape (ndim,) The pixel spacing along each axis of the image. Returns ------- properties : list of RegionProperties - Each item describes one labeled region, and can be accessed using the - attributes listed below. + Each item of the list corresponds to one labeled image region, + and can be accessed using the attributes listed below. Notes ----- @@ -1221,9 +1288,11 @@ def regionprops( **num_pixels** : int Number of foreground pixels. **area** : float - Area of the region i.e. number of pixels of the region scaled by pixel-area. + Area of the region, i.e., number of pixels of the region scaled by + pixel area (as determined by `spacing`). **area_bbox** : float - Area of the bounding box i.e. number of pixels of bounding box scaled by pixel-area. + Area of the bounding box, i.e., number of pixels of the region's + bounding box scaled by pixel area (as determined by `spacing`). **area_convex** : float Are of the convex hull image, which is the smallest convex polygon that encloses the region. @@ -1251,11 +1320,11 @@ def regionprops( Centroid coordinate tuple ``(row, col)``, relative to region bounding box, weighted with intensity image. **coords_scaled** : (K, 2) ndarray - Coordinate list ``(row, col)`` of the region scaled by ``spacing``. + Coordinate list ``(row, col)`` of the region scaled by `spacing`. **coords** : (K, 2) ndarray Coordinate list ``(row, col)`` of the region. **eccentricity** : float - Eccentricity of the ellipse that has the same second-moments as the + Eccentricity of the ellipse that has the same second moments as the region. The eccentricity is the ratio of the focal distance (distance between focal points) over the major axis length. The value is in the interval [0, 1). @@ -1269,47 +1338,49 @@ def regionprops( components plus number of holes subtracted by number of tunnels. **extent** : float Ratio of pixels in the region to pixels in the total bounding box. - Computed as ``area / (rows * cols)`` + Computed as ``area / (rows * cols)``. **feret_diameter_max** : float Maximum Feret's diameter computed as the longest distance between points around a region's convex hull contour as determined by ``find_contours``. [5]_ **image** : (H, J) ndarray - Sliced binary region image which has the same size as bounding box. + Binary region image sliced by the bounding box. **image_convex** : (H, J) ndarray - Binary convex hull image which has the same size as bounding box. + Binary convex hull image sliced by bounding box. **image_filled** : (H, J) ndarray - Binary region image with filled holes which has the same size as - bounding box. - **image_intensity** : ndarray - Image inside region bounding box. + Binary region image with filled holes sliced by bounding box. + **image_intensity** : (H, J) ndarray + Intensity image sliced by bounding box. **inertia_tensor** : ndarray Inertia tensor of the region for the rotation around its mass. **inertia_tensor_eigvals** : tuple The eigenvalues of the inertia tensor in decreasing order. **intensity_max** : float - Value with the greatest intensity in the region. + Value of greatest intensity in the region. **intensity_mean** : float - Value with the mean intensity in the region. + Average of intensity values in the region. + **intensity_median** : float + Value of median intensity in the region. **intensity_min** : float - Value with the least intensity in the region. + Value of lowest intensity in the region. **intensity_std** : float - Standard deviation of the intensity in the region. + Standard deviation of intensity values in the region. **label** : int - The label in the labeled input image. + The region's label in the input label image. **moments** : (3, 3) ndarray Spatial moments up to 3rd order:: m_ij = sum{ array(row, col) * row^i * col^j } - where the sum is over the `row`, `col` coordinates of the region. + where the sum is over the ``row, col`` coordinates of the region. **moments_central** : (3, 3) ndarray Central moments (translation invariant) up to 3rd order:: mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } - where the sum is over the `row`, `col` coordinates of the region, - and `row_c` and `col_c` are the coordinates of the region's centroid. + where the sum is over the ``row, col`` coordinates of the region, + and ``row_c`` and ``col_c`` are the coordinates of the region's + centroid. **moments_hu** : tuple Hu moments (translation, scale and rotation invariant). **moments_normalized** : (3, 3) ndarray @@ -1317,20 +1388,20 @@ def regionprops( nu_ij = mu_ij / m_00^[(i+j)/2 + 1] - where `m_00` is the zeroth spatial moment. + where ``m_00`` is the zeroth spatial moment. **moments_weighted** : (3, 3) ndarray Spatial moments of intensity image up to 3rd order:: wm_ij = sum{ array(row, col) * row^i * col^j } - where the sum is over the `row`, `col` coordinates of the region. + where the sum is over the ``row, col`` coordinates of the region. **moments_weighted_central** : (3, 3) ndarray Central moments (translation invariant) of intensity image up to 3rd order:: wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } - where the sum is over the `row`, `col` coordinates of the region, + where the sum is over the ``row, col`` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's weighted centroid. **moments_weighted_hu** : tuple @@ -1342,26 +1413,26 @@ def regionprops( wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1] - where ``wm_00`` is the zeroth spatial moment (intensity-weighted area). + where ``wm_00`` is the zero-th spatial moment (intensity-weighted area). **orientation** : float Angle between the 0th axis (rows) and the major axis of the ellipse that has the same second moments as the region, - ranging from `-pi/2` to `pi/2` counter-clockwise. + ranging from :math:`-\pi/2` to :math:`\pi/2` counter-clockwise. **perimeter** : float - Perimeter of object which approximates the contour as a line + Perimeter of the region which approximates the contour as a line through the centers of border pixels using a 4-connectivity. **perimeter_crofton** : float - Perimeter of object approximated by the Crofton formula in 4 + Perimeter of the region approximated by the Crofton formula in 4 directions. **slice** : tuple of slices - A slice to extract the object from the source image. + A slice to extract the region from the input image. **solidity** : float Ratio of pixels in the region to pixels of the convex hull image. - Each region also supports iteration, so that you can do:: + `properties` also supports iteration, so that you can do:: - for prop in region: - print(prop, region[prop]) + for region in properties: + print(region, properties[region]) See Also -------- @@ -1395,7 +1466,7 @@ def regionprops( >>> props[0]['centroid'] (22.72987986048314, 81.91228523446583) - Add custom measurements by passing functions as ``extra_properties`` + Add custom measurements by passing functions as ``extra_properties``: >>> from skimage import data, util >>> from cucim.skimage.measure import label, regionprops @@ -1405,8 +1476,10 @@ def regionprops( >>> def pixelcount(regionmask): ... return np.sum(regionmask) >>> props = regionprops(label_img, extra_properties=(pixelcount,)) + >>> # pixelcount of first labeled region >>> props[0].pixelcount array(7741) + >>> # pixelcount of second labeled region >>> props[1]['pixelcount'] array(42) diff --git a/python/cucim/src/cucim/skimage/measure/_regionprops_gpu.py b/python/cucim/src/cucim/skimage/measure/_regionprops_gpu.py index ba5189307..775eb4c24 100644 --- a/python/cucim/src/cucim/skimage/measure/_regionprops_gpu.py +++ b/python/cucim/src/cucim/skimage/measure/_regionprops_gpu.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 import warnings @@ -116,11 +116,19 @@ "equivalent_spherical_perimeter": "equivalent_spherical_perimeter", } PROPS_GPU.update(PROPS_GPU_EXTRA) +# properties recently removed from PROPS dict (but still supported) +PROPS_REMOVED = { + "coords_scaled": "coords_scaled", + "num_pixels": "num_pixels", +} +PROPS_GPU.update(PROPS_REMOVED) + CURRENT_PROPS_GPU = set(PROPS_GPU.values()) COL_DTYPES_EXTRA = { "axis_lengths": float, + "coords_scaled": object, "inertia_tensor_eigenvectors": float, "num_pixels_filled": int, "num_perimeter_pixels": int, @@ -308,6 +316,13 @@ def regionprops_dict( invalid_names = set(properties) - valid_names valid_names = list(valid_names) + # TODO(grelee): implement batch kernel for efficient intensity median + # computation. + if "intensity_median" in properties: + raise NotImplementedError( + "Batch computation of 'intensity_median' is not yet supported" + ) + # Use only the modern names internally, but keep list of mappings back to # any deprecated names in restore_legacy_names and use that at the end to # restore the requested deprecated property names. diff --git a/python/cucim/src/cucim/skimage/measure/_regionprops_gpu_convex.py b/python/cucim/src/cucim/skimage/measure/_regionprops_gpu_convex.py index c4e622cbd..d26a42b46 100644 --- a/python/cucim/src/cucim/skimage/measure/_regionprops_gpu_convex.py +++ b/python/cucim/src/cucim/skimage/measure/_regionprops_gpu_convex.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 import math @@ -82,7 +82,9 @@ def _feret_diameter_max(image_convex, spacing=None, return_argmax=False): """Compute the maximum Feret diameter of a single convex image region.""" if image_convex.size == 1: warnings.warn( - "single element image, returning 0 for feret diameter", UserWarning + "single element image, returning 0 for feret diameter", + UserWarning, + stacklevel=2, ) return 0 coords = _regionprops_coords_perimeter(image_convex, connectivity=1) diff --git a/python/cucim/src/cucim/skimage/measure/_regionprops_utils.py b/python/cucim/src/cucim/skimage/measure/_regionprops_utils.py index 05ced0656..b75474b98 100644 --- a/python/cucim/src/cucim/skimage/measure/_regionprops_utils.py +++ b/python/cucim/src/cucim/skimage/measure/_regionprops_utils.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import math @@ -75,7 +75,7 @@ def euler_number(image, connectivity=None): Parameters ---------- - image: (M, N[, P]) ndarray + image : (M, N[, P]) ndarray Input image. If image is not binary, all values greater than zero are considered as the object. connectivity : int, optional diff --git a/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py b/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py index fbe11b6f2..29062e528 100644 --- a/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py +++ b/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import math @@ -99,6 +99,9 @@ def get_central_moment_function(img, spacing=(1, 1)): return lambda p, q: cp.sum((Y - cY) ** p * (X - cX) ** q * img) +@pytest.mark.filterwarnings( + "ignore:`RegionProperties.* is deprecated:FutureWarning" +) def test_all_props(): region = regionprops(SAMPLE, INTENSITY_SAMPLE)[0] for prop in PROPS: @@ -120,6 +123,9 @@ def test_all_props(): pass +@pytest.mark.filterwarnings( + "ignore:`RegionProperties.* is deprecated:FutureWarning" +) def test_all_props_3d(): region = regionprops(SAMPLE_3D, INTENSITY_SAMPLE_3D)[0] for prop in PROPS: @@ -642,6 +648,13 @@ def test_intensity_mean(): assert_almost_equal(intensity, 1.02777777777777) +def test_intensity_median(): + intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[ + 0 + ].intensity_median + assert_almost_equal(intensity, 1.0) + + def test_intensity_min(): intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[ 0 @@ -673,6 +686,28 @@ def test_axis_minor_length(): length = regionprops(img[::2], spacing=(2, 1))[0].axis_minor_length assert_almost_equal(length, target_length, decimal=1) + # this input can produce small value that can be negative due to numerical errors + img_negative_length = cp.array( + [ + [ + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], + ] + ], + dtype=np.uint8, + ) + + length = regionprops(img_negative_length)[0].axis_minor_length + target_length = 0.0 + assert_almost_equal(length, target_length) + def test_moments(): m = regionprops(SAMPLE)[0].moments @@ -1059,6 +1094,9 @@ def test_equals(): assert_equal(r1 != r3, True, "Different regionprops are equal") +@pytest.mark.filterwarnings( + "ignore:`RegionProperties.* is deprecated:FutureWarning" +) def test_iterate_all_props(): region = regionprops(SAMPLE)[0] p0 = {p: region[p] for p in region} @@ -1213,6 +1251,9 @@ def test_regionprops_table_deprecated_scalar_property(batch_processing): assert list(out.keys()) == ["bbox_area"] +@pytest.mark.filterwarnings( + "ignore:`RegionProperties.* is deprecated:FutureWarning" +) def test_regionprops_table_equal_to_original_no_batch_processing(): regions = regionprops(SAMPLE, INTENSITY_FLOAT_SAMPLE) out_table = regionprops_table( @@ -1240,6 +1281,9 @@ def test_regionprops_table_equal_to_original_no_batch_processing(): assert_array_equal(rp[loc], out_table[modified_prop][i]) +@pytest.mark.filterwarnings( + "ignore:`RegionProperties.* is deprecated:FutureWarning" +) def test_regionprops_table_batch_close_to_original(): regions = regionprops(SAMPLE, INTENSITY_FLOAT_SAMPLE) out_table = regionprops_table( @@ -1250,7 +1294,6 @@ def test_regionprops_table_batch_close_to_original(): ) for prop, dtype in COL_DTYPES.items(): - print(f"{prop=}") for i, reg in enumerate(regions): rp = reg[prop] if ( @@ -1314,10 +1357,9 @@ def test_regionprops_table_error_on_numpy_input(): ) -def test_column_dtypes_complete(): - assert set(COL_DTYPES.keys()).union(OBJECT_COLUMNS) == set(PROPS.values()) - - +@pytest.mark.filterwarnings( + "ignore:`RegionProperties.* is deprecated:FutureWarning" +) def test_column_dtypes_correct(): msg = "mismatch with expected type," region = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[0] @@ -1363,6 +1405,10 @@ def test_all_documented_items_in_col_dtypes(): re.search(pattern, property_line).group("property_name") for property_line in property_lines } + + # TODO(grelee): remove next line once `intensity_median` is fully enabled + property_names.remove("intensity_median") + column_keys = set(COL_DTYPES.keys()) assert column_keys == property_names @@ -1372,7 +1418,7 @@ def pixelcount(regionmask): return cp.sum(regionmask) -def intensity_median(regionmask, image_intensity): +def custom_intensity_median(regionmask, image_intensity): return cp.median(image_intensity[regionmask]) @@ -1398,9 +1444,11 @@ def test_extra_properties_intensity(): region = regionprops( SAMPLE, intensity_image=INTENSITY_SAMPLE, - extra_properties=(intensity_median,), + extra_properties=(custom_intensity_median,), )[0] - assert region.intensity_median == cp.median(INTENSITY_SAMPLE[SAMPLE == 1]) + assert region.custom_intensity_median == cp.median( + INTENSITY_SAMPLE[SAMPLE == 1] + ) def test_extra_properties_intensity_multichannel(): @@ -1408,10 +1456,10 @@ def test_extra_properties_intensity_multichannel(): region = regionprops( SAMPLE, intensity_image=cp.stack((INTENSITY_SAMPLE,) * n_channels, axis=-1), - extra_properties=(intensity_median,), + extra_properties=(custom_intensity_median,), )[0] for c in range(n_channels): - assert region.intensity_median[c] == cp.median( + assert region.custom_intensity_median[c] == cp.median( INTENSITY_SAMPLE[SAMPLE == 1] ) @@ -1430,8 +1478,10 @@ def test_intensity_image_required(intensity_prop): def test_extra_properties_no_intensity_provided(): with pytest.raises(AttributeError): - region = regionprops(SAMPLE, extra_properties=(intensity_median,))[0] - _ = region.intensity_median + region = regionprops( + SAMPLE, extra_properties=(custom_intensity_median,) + )[0] + _ = region.custom_intensity_median def test_extra_properties_nr_args(): @@ -1448,9 +1498,11 @@ def test_extra_properties_mixed(): region = regionprops( SAMPLE, intensity_image=INTENSITY_SAMPLE, - extra_properties=(intensity_median, pixelcount), + extra_properties=(custom_intensity_median, pixelcount), )[0] - assert region.intensity_median == cp.median(INTENSITY_SAMPLE[SAMPLE == 1]) + assert region.custom_intensity_median == cp.median( + INTENSITY_SAMPLE[SAMPLE == 1] + ) assert region.pixelcount == cp.sum(SAMPLE == 1) @@ -1460,10 +1512,12 @@ def test_extra_properties_table(batch_processing): SAMPLE_MULTIPLE, intensity_image=INTENSITY_SAMPLE_MULTIPLE, properties=("label",), - extra_properties=(intensity_median, pixelcount, bbox_list), + extra_properties=(custom_intensity_median, pixelcount, bbox_list), batch_processing=batch_processing, ) - assert_array_almost_equal(out["intensity_median"], np.array([2.0, 4.0])) + assert_array_almost_equal( + out["custom_intensity_median"], np.array([2.0, 4.0]) + ) assert_array_equal(out["pixelcount"], np.array([10, 2])) assert out["bbox_list"].dtype == np.object_ @@ -1471,7 +1525,13 @@ def test_extra_properties_table(batch_processing): assert out["bbox_list"][1] == [1] * 1 -def test_multichannel(): +@pytest.mark.filterwarnings( + "ignore:`RegionProperties.* is deprecated:FutureWarning" +) +@pytest.mark.parametrize( + "prop_name", [*PROPS.keys(), "custom_intensity_median"] +) +def test_multichannel(prop_name): """Test that computing multichannel properties works.""" astro = data.astronaut()[::4, ::4] labels = slic(astro.astype(float), start_label=1) @@ -1484,30 +1544,29 @@ def test_multichannel(): region = regionprops( labels, astro_green, - extra_properties=[intensity_median], + extra_properties=[custom_intensity_median], )[segment_idx] region_multi = regionprops( labels, astro, - extra_properties=[intensity_median], + extra_properties=[custom_intensity_median], )[segment_idx] - for prop in list(PROPS.keys()) + ["intensity_median"]: - p = region[prop] - p_multi = region_multi[prop] - if isinstance(p, (list, tuple)): - p = tuple([cp.asnumpy(p_) for p_ in p]) - p = np.stack(p) - if isinstance(p_multi, (list, tuple)): - p_multi = tuple([cp.asnumpy(p_) for p_ in p_multi]) - p_multi = np.stack(p_multi) - if np.shape(p) == np.shape(p_multi): - # property does not depend on multiple channels - assert_array_equal(p, p_multi) - else: - # property uses multiple channels, returns props stacked along - # final axis - assert_array_equal(p, p_multi[..., 1]) + p = region[prop_name] + p_multi = region_multi[prop_name] + if isinstance(p, (list, tuple)): + p = tuple([cp.asnumpy(p_) for p_ in p]) + p = np.stack(p) + if isinstance(p_multi, (list, tuple)): + p_multi = tuple([cp.asnumpy(p_) for p_ in p_multi]) + p_multi = np.stack(p_multi) + if np.shape(p) == np.shape(p_multi): + # property does not depend on multiple channels + assert_array_equal(p, p_multi) + else: + # property uses multiple channels, returns props stacked along + # final axis + assert_array_equal(p, p_multi[..., 1]) def _inertia_eigvals_to_axes_lengths_3D(inertia_tensor_eigvals): @@ -1587,3 +1646,45 @@ def test_3d_ellipsoid_axis_lengths(): # verify that the axis length regionprops also agree assert abs(rp.axis_major_length - axis_lengths[0]) < 1e-7 assert abs(rp.axis_minor_length - axis_lengths[-1]) < 1e-7 + + +@pytest.mark.parametrize("old_name", list(PROPS)) +@pytest.mark.filterwarnings( + "ignore:feret diameter_max not currently implemented" +) +@pytest.mark.filterwarnings( + "ignore:.*Falling back to SciPy-based CPU implementation.:UserWarning" +) +def test_deprecated_properties(old_name): + regions = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE) + + regex = rf"`RegionProperties\['{old_name}'\]` is deprecated" + with pytest.warns(FutureWarning, match=regex): + result = regions[0][old_name] + + current_name = PROPS[old_name] + if "centroid" in current_name: + for expected_c, c in zip(result, regions[0][current_name]): + cp.testing.assert_array_equal(expected_c, c) + else: + cp.testing.assert_array_equal(result, regions[0][current_name]) + + if old_name == old_name.lower(): + # Lower-case properties in PROPS – such as "bbox_area" or + # "convex_image" – were accessible as attributes in addition to being + # available via `__getitem__`. + # Make sure those emit an appropriate deprecation warning too + regex = f"`RegionProperties.{old_name}` is deprecated." + with pytest.warns(FutureWarning, match=regex): + result = getattr(regions[0], old_name) + current_name = PROPS[old_name] + if "centroid" in current_name: + for expected_c, c in zip(result, regions[0][current_name]): + cp.testing.assert_array_equal(expected_c, c) + else: + cp.testing.assert_array_equal(result, regions[0][current_name]) + + else: + regex = f"'RegionProperties' object has no attribute {old_name!r}" + with pytest.raises(AttributeError, match=regex): + getattr(regions[0], old_name) diff --git a/python/cucim/src/cucim/skimage/metrics/simple_metrics.py b/python/cucim/src/cucim/skimage/metrics/simple_metrics.py index ba3c47b2a..4a6be372f 100644 --- a/python/cucim/src/cucim/skimage/metrics/simple_metrics.py +++ b/python/cucim/src/cucim/skimage/metrics/simple_metrics.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp @@ -212,7 +212,9 @@ def normalized_mutual_information(image0, image1, *, bins=100): Y(A, B) = \frac{H(A) + H(B)}{H(A, B)} - where :math:`H(X) := - \sum_{x \in X}{x \log x}` is the entropy. + where :math:`H(X) := - \sum_{x \in X}{p(x) \log p(x)}` is the entropy, + :math:`X` is the set of image values, and :math:`p(x)` is the probability + of occurrence of value :math:`x \in X`. It was proposed to be useful in registering images by Colin Studholme and colleagues [1]_. It ranges from 1 (perfectly uncorrelated image values) diff --git a/python/cucim/src/cucim/skimage/morphology/binary.py b/python/cucim/src/cucim/skimage/morphology/binary.py index 5f2733c14..d48a1a182 100644 --- a/python/cucim/src/cucim/skimage/morphology/binary.py +++ b/python/cucim/src/cucim/skimage/morphology/binary.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause """ @@ -48,11 +48,30 @@ def _iterate_binary_func(binary_func, image, footprint, out, border_value): return out +# Note: scikit-image 0.26 deprecated the binary_* functions. +# +# For cuCIM, we need to consider whether or not to do the same. For +# scikit-image it seems grayscale morphology is equal or faster to binary, but +# for cuCIM performance is better with these binary variants. + + # The default_footprint decorator provides a diamond footprint as # default with the same dimension as the input image and size 3 along each # axis. +# @deprecate_func( +# # rapids-pre-commit-hooks: disable-next-line[verify-hardcoded-version] +# deprecated_version="26.04", +# # rapids-pre-commit-hooks: disable-next-line[verify-hardcoded-version] +# removed_version="26.12", +# hint="Use `skimage.morphology.erosion` instead. " +# "Note the pixel shift by 1 for even-sized footprints (see docstring notes).", +# ) @default_footprint def binary_erosion(image, footprint=None, out=None, *, mode="ignore"): + return _binary_erosion(image, footprint=footprint, out=out, mode=mode) + + +def _binary_erosion(image, footprint=None, out=None, *, mode="ignore"): """Return fast binary morphological erosion of an image. This function returns the same result as grayscale erosion but performs @@ -104,7 +123,12 @@ def binary_erosion(image, footprint=None, out=None, *, mode="ignore"): For even-sized footprints, :func:`skimage.morphology.erosion` and this function produce an output that differs: one is shifted by one pixel - compared to the other. + compared to the other. :func:`cucim.skimage.morphology.pad_footprint´ is + available to account for this. + + See Also + -------- + cucim.skimage.morphology.isotropic_erosion """ if out is None: out = cp.empty(image.shape, dtype=bool) @@ -128,8 +152,21 @@ def binary_erosion(image, footprint=None, out=None, *, mode="ignore"): return out +# @deprecate_func( +# # rapids-pre-commit-hooks: disable-next-line[verify-hardcoded-version] +# deprecated_version="26.04", +# # rapids-pre-commit-hooks: disable-next-line[verify-hardcoded-version] +# removed_version="26.12", +# hint="Use `skimage.morphology.dilation` instead. " +# "Note the lack of mirroring for non-symmetric footprints (see docstring " +# "notes).", +# ) @default_footprint def binary_dilation(image, footprint=None, out=None, *, mode="ignore"): + return _binary_dilation(image, footprint=footprint, out=out, mode=mode) + + +def _binary_dilation(image, footprint=None, out=None, *, mode="ignore"): """Return fast binary morphological dilation of an image. This function returns the same result as grayscale dilation but performs @@ -182,6 +219,12 @@ def binary_dilation(image, footprint=None, out=None, *, mode="ignore"): For non-symmetric footprints, :func:`skimage.morphology.binary_dilation` and :func:`skimage.morphology.dilation` produce an output that differs: `binary_dilation` mirrors the footprint, whereas `dilation` does not. + :func:`cucim.skimage.morphology.mirror_footprint` is available to correct + for this. + + See Also + -------- + cucim.skimage.morphology.isotropic_dilation """ if out is None: out = cp.empty(image.shape, dtype=bool) @@ -205,8 +248,19 @@ def binary_dilation(image, footprint=None, out=None, *, mode="ignore"): return out +# @deprecate_func( +# # rapids-pre-commit-hooks: disable-next-line[verify-hardcoded-version] +# deprecated_version="26.04", +# # rapids-pre-commit-hooks: disable-next-line[verify-hardcoded-version] +# removed_version="26.12", +# hint="Use `skimage.morphology.opening` instead. " +# ) @default_footprint def binary_opening(image, footprint=None, out=None, *, mode="ignore"): + return _binary_opening(image, footprint=footprint, out=out, mode=mode) + + +def _binary_opening(image, footprint=None, out=None, *, mode="ignore"): """Return fast binary morphological opening of an image. This function returns the same result as grayscale opening but performs @@ -255,14 +309,29 @@ def binary_opening(image, footprint=None, out=None, *, mode="ignore"): computational cost. Most of the builtin footprints such as :func:`skimage.morphology.disk` provide an option to automatically generate a footprint sequence of this type. + + See Also + -------- + cucim.skimage.morphology.isotropic_opening """ - tmp = binary_erosion(image, footprint, mode=mode) - out = binary_dilation(tmp, footprint, out=out, mode=mode) + tmp = _binary_erosion(image, footprint, mode=mode) + out = _binary_dilation(tmp, footprint, out=out, mode=mode) return out +# @deprecate_func( +# # rapids-pre-commit-hooks: disable-next-line[verify-hardcoded-version] +# deprecated_version="26.04", +# # rapids-pre-commit-hooks: disable-next-line[verify-hardcoded-version] +# removed_version="26.12", +# hint="Use `skimage.morphology.closing` instead. " +# ) @default_footprint def binary_closing(image, footprint=None, out=None, *, mode="ignore"): + return _binary_closing(image, footprint=footprint, out=out, mode=mode) + + +def _binary_closing(image, footprint=None, out=None, *, mode="ignore"): """Return fast binary morphological closing of an image. This function returns the same result as grayscale closing but performs @@ -311,7 +380,11 @@ def binary_closing(image, footprint=None, out=None, *, mode="ignore"): computational cost. Most of the builtin footprints such as :func:`skimage.morphology.disk` provide an option to automatically generate a footprint sequence of this type. + + See Also + -------- + cucim.skimage.morphology.isotropic_closing """ - tmp = binary_dilation(image, footprint, mode=mode) - out = binary_erosion(tmp, footprint, out=out, mode=mode) + tmp = _binary_dilation(image, footprint, mode=mode) + out = _binary_erosion(tmp, footprint, out=out, mode=mode) return out diff --git a/python/cucim/src/cucim/skimage/morphology/convex_hull.py b/python/cucim/src/cucim/skimage/morphology/convex_hull.py index 210b2aff4..b35bed0a0 100644 --- a/python/cucim/src/cucim/skimage/morphology/convex_hull.py +++ b/python/cucim/src/cucim/skimage/morphology/convex_hull.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause from itertools import product @@ -162,7 +162,7 @@ def convex_hull_image( Tolerance when determining whether a point is inside the hull. Due to numerical floating point errors, a tolerance of 0 can result in some points erroneously being classified as being outside the hull. - include_borders: bool, optional + include_borders : bool, optional If ``False``, vertices/edges are excluded from the final hull mask. Note that the ``True`` case is not currently implemented in cuCIM and will raise a ``NotImplementedError``. diff --git a/python/cucim/src/cucim/skimage/morphology/footprints.py b/python/cucim/src/cucim/skimage/morphology/footprints.py index 0a6fa750f..f46879e54 100644 --- a/python/cucim/src/cucim/skimage/morphology/footprints.py +++ b/python/cucim/src/cucim/skimage/morphology/footprints.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2022-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import os @@ -979,7 +979,7 @@ def pad_footprint(footprint, *, pad_end=True): if all(s % 2 for s in footprint): # can use tuple directly if all dimensions have odd length return footprint - # Use a padded array instead of the tuple of any sizes were even + # Use a padded array instead of the tuple if any sizes were even padded_shape = tuple(s if s % 2 else s + 1 for s in footprint) padded = np.zeros(padded_shape, dtype=cp.uint8) if pad_end: diff --git a/python/cucim/src/cucim/skimage/morphology/gray.py b/python/cucim/src/cucim/skimage/morphology/gray.py index cd2f15242..f35aacbbd 100644 --- a/python/cucim/src/cucim/skimage/morphology/gray.py +++ b/python/cucim/src/cucim/skimage/morphology/gray.py @@ -1,18 +1,15 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2022-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause """ Grayscale morphological operations """ -import warnings - import cupy as cp import cucim.skimage._vendored.ndimage as ndi -from .._shared.utils import DEPRECATED from .footprints import ( _footprint_is_sequence, mirror_footprint, @@ -63,73 +60,6 @@ def _iterate_gray_func(gray_func, image, footprints, out, mode, cval): return out -def _shift_footprint(footprint, shift_x, shift_y): - """Shift the binary image `footprint` in the left and/or up. - - This only affects 2D footprints with even number of rows - or columns. - - Parameters - ---------- - footprint : 2D array, shape (M, N) - The input footprint. - shift_x, shift_y : bool - Whether to move `footprint` along each axis. - - Returns - ------- - out : 2D array, shape (M + int(shift_x), N + int(shift_y)) - The shifted footprint. - """ - if isinstance(footprint, tuple): - if len(footprint) == 2 and any(s % 2 == 0 for s in footprint): - # have to use an explicit array to shift the footprint below - footprint = cp.ones(footprint, dtype=bool) - else: - # no shift needed - return footprint - if footprint.ndim != 2: - # do nothing for 1D or 3D or higher footprints - return footprint - m, n = footprint.shape - if m % 2 == 0: - extra_row = cp.zeros((1, n), footprint.dtype) - if shift_x: - footprint = cp.vstack((footprint, extra_row)) - else: - footprint = cp.vstack((extra_row, footprint)) - m += 1 - if n % 2 == 0: - extra_col = cp.zeros((m, 1), footprint.dtype) - if shift_y: - footprint = cp.hstack((footprint, extra_col)) - else: - footprint = cp.hstack((extra_col, footprint)) - return footprint - - -def _shift_footprints(footprint, shift_x, shift_y): - """Shifts the footprints, whether it's a single array or a sequence. - - See `_shift_footprint`, which is called for each array in the sequence. - """ - if shift_x is DEPRECATED and shift_y is DEPRECATED: - return footprint - - warning_msg = ( - "The parameters `shift_x` and `shift_y` are deprecated since v0.23 and " - "will be removed in v0.26. Use `pad_footprint` or modify the footprint" - "manually instead." - ) - warnings.warn(warning_msg, FutureWarning, stacklevel=4) - - if _footprint_is_sequence(footprint): - return tuple( - (_shift_footprint(fp, shift_x, shift_y), n) for fp, n in footprint - ) - return _shift_footprint(footprint, shift_x, shift_y) - - def _min_max_to_constant_mode(dtype, mode, cval): """Replace 'max' and 'min' with appropriate 'cval' and 'constant' mode.""" if mode == "max": @@ -168,8 +98,6 @@ def erosion( image, footprint=None, out=None, - shift_x=DEPRECATED, - shift_y=DEPRECATED, *, mode="reflect", cval=0.0, @@ -206,12 +134,6 @@ def erosion( .. versionadded:: 24.06 `mode` and `cval` were added in 24.06. - Other Parameters - ---------------- - shift_x, shift_y : DEPRECATED - - .. deprecated:: 24.06 - Returns ------- eroded : cupy.ndarray, same shape as `image` @@ -235,7 +157,8 @@ def erosion( For even-sized footprints, :func:`skimage.morphology.binary_erosion` and this function produce an output that differs: one is shifted by one pixel - compared to the other. + compared to the other. :func:`cucim.skimage.morphology.pad_footprint` is + available to account for this. Examples -------- @@ -265,19 +188,6 @@ def erosion( mode, cval = _min_max_to_constant_mode(image.dtype, mode, cval) is_footprint_sequence = _footprint_is_sequence(footprint) - footprint = _shift_footprints(footprint, shift_x, shift_y) - - # if isinstance(footprint, tuple) and not is_footprint_sequence: - # if len(footprint) != image.ndim: - # raise ValueError( - # "footprint.ndim={len(footprint)}, image.ndim={image.ndim}" - # ) - # if image.ndim == 2 and any(s % 2 == 0 for s in footprint): - # # only odd-shaped footprints are properly handled for tuples - # footprint = cp.ones(footprint, dtype=bool) - # else: - # ndi.grey_erosion(image, size=footprint, output=out) - # return out footprint = pad_footprint(footprint, pad_end=False) @@ -300,8 +210,6 @@ def dilation( image, footprint=None, out=None, - shift_x=DEPRECATED, - shift_y=DEPRECATED, *, mode="reflect", cval=0.0, @@ -339,12 +247,6 @@ def dilation( .. versionadded:: 24.06 `mode` and `cval` were added in 24.06. - Other Parameters - ---------------- - shift_x, shift_y : DEPRECATED - - .. deprecated:: 24.06 - Returns ------- dilated : cupy.ndarray, same shape and type as `image` @@ -369,6 +271,8 @@ def dilation( For non-symmetric footprints, :func:`skimage.morphology.binary_dilation` and :func:`skimage.morphology.dilation` produce an output that differs: `binary_dilation` mirrors the footprint, whereas `dilation` does not. + :func:`cucim.skimage.morphology.mirror_footprint` is available to correct + for this. Examples -------- @@ -398,19 +302,6 @@ def dilation( mode, cval = _min_max_to_constant_mode(image.dtype, mode, cval) is_footprint_sequence = _footprint_is_sequence(footprint) - footprint = _shift_footprints(footprint, shift_x, shift_y) - - # if isinstance(footprint, tuple) and not is_footprint_sequence: - # if len(footprint) != image.ndim: - # raise ValueError( - # "footprint.ndim={len(footprint)}, image.ndim={image.ndim}" - # ) - # if image.ndim == 2 and any(s % 2 == 0 for s in footprint): - # # only odd-shaped footprints are properly handled for tuples - # footprint = cp.ones(footprint, dtype=bool) - # else: - # ndi.grey_dilation(image, size=footprint, output=out) - # return out footprint = pad_footprint(footprint, pad_end=False) # Note that `ndi.grey_dilation` mirrors the footprint and this diff --git a/python/cucim/src/cucim/skimage/morphology/isotropic.py b/python/cucim/src/cucim/skimage/morphology/isotropic.py index 22c53d76b..e945d6def 100644 --- a/python/cucim/src/cucim/skimage/morphology/isotropic.py +++ b/python/cucim/src/cucim/skimage/morphology/isotropic.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2022-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause """ @@ -44,11 +44,12 @@ def _check_output(out, shape): def isotropic_erosion(image, radius, out=None, spacing=None): """Return binary morphological erosion of an image. - This function returns the same result as - :func:`skimage.morphology.binary_erosion` but performs faster for large - circular structuring elements. This works by applying a threshold to the - exact Euclidean distance map of the image [1]_, [2]_. The implementation is - based on: + Compared to the more general :func:`skimage.morphology.erosion`, this + function only supports binary inputs and circular footprints. + However, it performs typically faster for large (circular) footprints. + This works by applying a threshold to the exact Euclidean distance map of + the image [1]_, [2]_. + The implementation is based on: func:`cucim.core.operations.morphology.distance_transform_edt`. Parameters @@ -56,7 +57,7 @@ def isotropic_erosion(image, radius, out=None, spacing=None): image : ndarray Binary input image. radius : float - The radius by which regions should be eroded. + The radius of the footprint used for the operation. out : ndarray of bool, optional The array to store the result of the morphology. If None, a new array will be allocated. @@ -94,6 +95,26 @@ def isotropic_erosion(image, radius, out=None, spacing=None): and thresholding of distance maps, Pattern Recognition Letters, Volume 13, Issue 3, 1992, Pages 161-166. :DOI:`10.1016/0167-8655(92)90055-5` + + Examples + -------- + Erosion shrinks bright regions + + >>> import cupy as cp + >>> import cucim.skimage as ski + >>> image = cp.array([[0, 0, 1, 0, 0], + ... [0, 1, 1, 1, 0], + ... [0, 1, 1, 1, 0], + ... [0, 1, 1, 1, 0], + ... [0, 0, 0, 0, 0]], dtype=bool) + >>> result = ski.morphology.isotropic_erosion(image, radius=1) + >>> result.view(cp.uint8) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]], dtype=uint8) + """ out = _check_output(out, image.shape) dist = distance_transform_edt(image, sampling=spacing) @@ -107,11 +128,12 @@ def isotropic_erosion(image, radius, out=None, spacing=None): def isotropic_dilation(image, radius, out=None, spacing=None): """Return binary morphological dilation of an image. - This function returns the same result as - :func:`skimage.morphology.binary_dilation` but performs faster for large - circular structuring elements. This works by applying a threshold to the - exact Euclidean distance map of the inverted image [1]_, [2]_. The - implementation is based on: + Compared to the more general :func:`cucim.skimage.morphology.dilation`, + this function only supports binary inputs and circular footprints. + However, it performs typically faster for large (circular) footprints. + This works by applying a threshold to the exact Euclidean distance map of + the inverted image [1]_, [2]_. + The implementation is based on: func:`cucim.core.operations.morphology.distance_transform_edt`. Parameters @@ -119,7 +141,7 @@ def isotropic_dilation(image, radius, out=None, spacing=None): image : ndarray Binary input image. radius : float - The radius by which regions should be dilated. + The radius of the footprint used for the operation. out : ndarray of bool, optional The array to store the result of the morphology. If None is passed, a new array will be allocated. @@ -158,6 +180,26 @@ def isotropic_dilation(image, radius, out=None, spacing=None): and thresholding of distance maps, Pattern Recognition Letters, Volume 13, Issue 3, 1992, Pages 161-166. :DOI:`10.1016/0167-8655(92)90055-5` + + Examples + -------- + Dilation enlarges bright regions + + >>> import cupy as cp + >>> import cucim.skimage as ski + >>> image = cp.array([[0, 0, 0, 0, 0], + ... [0, 0, 0, 0, 0], + ... [0, 0, 1, 0, 0], + ... [0, 0, 1, 1, 0], + ... [0, 0, 0, 0, 0]], dtype=bool) + >>> result = ski.morphology.isotropic_dilation(image, radius=1) + >>> result.view(cp.uint8) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 1], + [0, 0, 1, 1, 0]], dtype=uint8) + """ out = _check_output(out, image.shape) dist = distance_transform_edt(cp.logical_not(image), sampling=spacing) @@ -170,11 +212,11 @@ def isotropic_dilation(image, radius, out=None, spacing=None): def isotropic_opening(image, radius, out=None, spacing=None): """Return binary morphological opening of an image. - - This function returns the same result as - :func:`skimage.morphology.binary_opening` but performs faster for large - circular structuring elements. This works by thresholding the exact - Euclidean distance map [1]_, [2]_. The implementation is based on: + Compared to the more general :func:`cucim.skimage.morphology.opening`, this + function only supports binary inputs and circular footprints. + However, it performs typically faster for large (circular) footprints. + This works by thresholding the exact Euclidean distance map [1]_, [2]_. + The implementation is based on: func:`cucim.core.operations.morphology.distance_transform_edt`. Parameters @@ -182,7 +224,7 @@ def isotropic_opening(image, radius, out=None, spacing=None): image : ndarray Binary input image. radius : float - The radius with which the regions should be opened. + The radius of the footprint used for the operation. out : ndarray of bool, optional The array to store the result of the morphology. If None is passed, a new array will be allocated. @@ -220,6 +262,26 @@ def isotropic_opening(image, radius, out=None, spacing=None): and thresholding of distance maps, Pattern Recognition Letters, Volume 13, Issue 3, 1992, Pages 161-166. :DOI:`10.1016/0167-8655(92)90055-5` + + Examples + -------- + Remove connection between two bright regions + + >>> import cupy as cp + >>> import cucim.skimage as ski + >>> image = cp.array([[1, 0, 0, 0, 1], + ... [1, 1, 0, 1, 1], + ... [1, 1, 1, 1, 1], + ... [1, 1, 0, 1, 1], + ... [1, 0, 0, 0, 1]], dtype=bool) + >>> result = ski.morphology.isotropic_opening(image, radius=1) + >>> result.view(cp.uint8) + array([[1, 0, 0, 0, 1], + [1, 1, 0, 1, 1], + [1, 1, 1, 1, 1], + [1, 1, 0, 1, 1], + [1, 0, 0, 0, 1]], dtype=uint8) + """ out = _check_output(out, image.shape) eroded = isotropic_erosion(image, radius, spacing=spacing) @@ -229,10 +291,11 @@ def isotropic_opening(image, radius, out=None, spacing=None): def isotropic_closing(image, radius, out=None, spacing=None): """Return binary morphological closing of an image. - This function returns the same result as binary - :func:`skimage.morphology.binary_closing` but performs faster for large - circular structuring elements. This works by thresholding the exact - Euclidean distance map [1]_, [2]_. The implementation is based on: + Compared to the more general :func:`cucim.skimage.morphology.closing`, this + function only supports binary inputs and circular footprints. + However, it performs typically faster for large (circular) footprints. + This works by thresholding the exact Euclidean distance map [1]_, [2]_. + The implementation is based on: func:`cucim.core.operations.morphology.distance_transform_edt`. Parameters @@ -240,7 +303,7 @@ def isotropic_closing(image, radius, out=None, spacing=None): image : ndarray Binary input image. radius : float - The radius with which the regions should be closed. + The radius of the footprint used for the operation. out : ndarray of bool, optional The array to store the result of the morphology. If None, is passed, a new array will be allocated. @@ -278,6 +341,25 @@ def isotropic_closing(image, radius, out=None, spacing=None): and thresholding of distance maps, Pattern Recognition Letters, Volume 13, Issue 3, 1992, Pages 161-166. :DOI:`10.1016/0167-8655(92)90055-5` + + Examples + -------- + Close gap between two bright regions + + >>> import cupy as cp + >>> import cucim.skimage as ski + >>> image = cp.array([[0, 0, 0, 0, 0], + ... [0, 0, 0, 0, 0], + ... [1, 1, 0, 1, 1], + ... [0, 0, 0, 0, 0], + ... [0, 0, 0, 0, 0]], dtype=bool) + >>> result = ski.morphology.isotropic_closing(image, radius=1) + >>> result.view(cp.uint8) + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [1, 1, 0, 1, 1], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]], dtype=uint8) """ out = _check_output(out, image.shape) dilated = isotropic_dilation(image, radius, spacing=spacing) diff --git a/python/cucim/src/cucim/skimage/morphology/misc.py b/python/cucim/src/cucim/skimage/morphology/misc.py index f80a6b791..179f28017 100644 --- a/python/cucim/src/cucim/skimage/morphology/misc.py +++ b/python/cucim/src/cucim/skimage/morphology/misc.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause """Miscellaneous morphology functions.""" @@ -10,7 +10,7 @@ import cucim.skimage._vendored.ndimage as ndi -from .._shared.utils import warn +from .._shared.utils import DEPRECATED, deprecate_parameter, warn # Our function names don't exactly correspond to ndimages. # This dictionary translates from our names to scipy's. @@ -63,12 +63,26 @@ def _check_dtype_supported(ar): ) -def remove_small_objects(ar, min_size=64, connectivity=1, *, out=None): +@deprecate_parameter( + deprecated_name="min_size", + new_name="max_size", + value_adapter=lambda x: x - 1, + # rapids-pre-commit-hooks: disable-next-line[verify-hardcoded-version] + start_version="26.04", + # rapids-pre-commit-hooks: disable-next-line[verify-hardcoded-version] + stop_version="26.12", + template=f"{deprecate_parameter.replace_parameter_template} " + "Note that the new threshold removes objects smaller than **or equal to** " + "its value, while the previous parameter only removed smaller ones.", +) +def remove_small_objects( + ar, min_size=DEPRECATED, connectivity=1, *, max_size=64, out=None +): """Remove objects smaller than the specified size. - Expects ar to be an array with labeled objects, and removes objects - smaller than min_size. If `ar` is bool, the image is first labeled. - This leads to potentially different behavior for bool and 0-and-1 + Expects `ar` to be an array with labeled objects, and removes objects + smaller than or equal to `max_size`. If `ar` is bool, the image is first + labeled. This leads to potentially different behavior for bool vs. 0-and-1 arrays. Parameters @@ -76,8 +90,9 @@ def remove_small_objects(ar, min_size=64, connectivity=1, *, out=None): ar : ndarray (arbitrary shape, int or bool type) The array containing the objects of interest. If the array type is int, the ints must be non-negative. - min_size : int, optional (default: 64) - The smallest allowable object size. + max_size : int, optional (default: 64) + Remove objects whose contiguous area (or volume, in N-D) contains this + number of pixels or fewer. connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1) The connectivity defining the neighborhood of a pixel. Used during labelling if `ar` is bool. @@ -97,6 +112,10 @@ def remove_small_objects(ar, min_size=64, connectivity=1, *, out=None): out : ndarray, same shape and type as input `ar` The input array with small connected components removed. + See Also + -------- + cucim.skimage.morphology.remove_small_holes + Examples -------- >>> import cupy as cp @@ -104,17 +123,17 @@ def remove_small_objects(ar, min_size=64, connectivity=1, *, out=None): >>> a = cp.array([[0, 0, 0, 1, 0], ... [1, 1, 1, 0, 0], ... [1, 1, 1, 0, 1]], bool) - >>> b = morphology.remove_small_objects(a, 6) + >>> b = morphology.remove_small_objects(a, max_size=5) >>> b array([[False, False, False, False, False], [ True, True, True, False, False], [ True, True, True, False, False]]) - >>> c = morphology.remove_small_objects(a, 7, connectivity=2) + >>> c = morphology.remove_small_objects(a, max_size=6, connectivity=2) >>> c array([[False, False, False, True, False], [ True, True, True, False, False], [ True, True, True, False, False]]) - >>> d = morphology.remove_small_objects(a, 6, out=a) + >>> d = morphology.remove_small_objects(a, max_size=5, out=a) >>> d is a True @@ -127,7 +146,7 @@ def remove_small_objects(ar, min_size=64, connectivity=1, *, out=None): else: out[:] = ar - if min_size == 0: # shortcut for efficiency + if max_size == 0: # shortcut for efficiency return out if out.dtype == bool: @@ -152,23 +171,38 @@ def remove_small_objects(ar, min_size=64, connectivity=1, *, out=None): "Did you mean to use a boolean array?" ) - too_small = component_sizes < min_size + too_small = component_sizes <= max_size + too_small_mask = too_small[ccs] out[too_small_mask] = 0 return out -def remove_small_holes(ar, area_threshold=64, connectivity=1, *, out=None): +@deprecate_parameter( + deprecated_name="area_threshold", + new_name="max_size", + value_adapter=lambda x: x - 1, + # rapids-pre-commit-hooks: disable-next-line[verify-hardcoded-version] + start_version="26.04", + # rapids-pre-commit-hooks: disable-next-line[verify-hardcoded-version] + stop_version="26.12", + template=f"{deprecate_parameter.replace_parameter_template} " + "Note that the new threshold removes objects smaller than **or equal to** " + "its value, while the previous parameter only removed smaller ones.", +) +def remove_small_holes( + ar, area_threshold=DEPRECATED, connectivity=1, *, max_size=64, out=None +): """Remove contiguous holes smaller than the specified size. Parameters ---------- ar : ndarray (arbitrary shape, int or bool type) The array containing the connected components of interest. - area_threshold : int, optional (default: 64) - The maximum area, in pixels, of a contiguous hole that will be filled. - Replaces `min_size`. + max_size : int, optional (default: 64) + Remove holes whose contiguous area (or volume, in N-D) contains this + number of pixels or fewer. connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1) The connectivity defining the neighborhood of a pixel. out : ndarray @@ -187,6 +221,10 @@ def remove_small_holes(ar, area_threshold=64, connectivity=1, *, out=None): out : ndarray, same shape and type as input `ar` The input array with small holes within connected components removed. + See Also + -------- + cucim.skimage.morphology.remove_small_objects + Examples -------- >>> import cupy as cp @@ -195,19 +233,19 @@ def remove_small_holes(ar, area_threshold=64, connectivity=1, *, out=None): ... [1, 1, 1, 0, 1, 0], ... [1, 0, 0, 1, 1, 0], ... [1, 1, 1, 1, 1, 0]], bool) - >>> b = morphology.remove_small_holes(a, 2) + >>> b = morphology.remove_small_holes(a, max_size=1) >>> b array([[ True, True, True, True, True, False], [ True, True, True, True, True, False], [ True, False, False, True, True, False], [ True, True, True, True, True, False]]) - >>> c = morphology.remove_small_holes(a, 2, connectivity=2) + >>> c = morphology.remove_small_holes(a, max_size=1, connectivity=2) >>> c array([[ True, True, True, True, True, False], [ True, True, True, False, True, False], [ True, False, False, True, True, False], [ True, True, True, True, True, False]]) - >>> d = morphology.remove_small_holes(a, 2, out=a) + >>> d = morphology.remove_small_holes(a, max_size=1, out=a) >>> d is a True @@ -239,7 +277,12 @@ def remove_small_holes(ar, area_threshold=64, connectivity=1, *, out=None): cp.logical_not(ar, out=out) # removing small objects from the inverse of ar - out = remove_small_objects(out, area_threshold, connectivity, out=out) + out = remove_small_objects( + out, + max_size=max_size, + connectivity=connectivity, + out=out, + ) cp.logical_not(out, out=out) diff --git a/python/cucim/src/cucim/skimage/morphology/tests/test_binary.py b/python/cucim/src/cucim/skimage/morphology/tests/test_binary.py index e3b527947..a0c2ca2f9 100755 --- a/python/cucim/src/cucim/skimage/morphology/tests/test_binary.py +++ b/python/cucim/src/cucim/skimage/morphology/tests/test_binary.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp @@ -10,6 +10,8 @@ from skimage import data from cucim.skimage import color, morphology + +# from cucim.skimage._shared.testing import assert_stacklevel from cucim.skimage.morphology import footprint_rectangle from cucim.skimage.util import img_as_bool @@ -17,6 +19,17 @@ bw_img = img > 100 / 255.0 +# So far we have not yet deprecated binary_* morphology functions, so no need +# yet for this warning skip. +# +# pytestmark = pytest.mark.filterwarnings( +# "ignore:" +# "`binary_(dilation|erosion|opening|closing)` is deprecated.*" +# "Use `skimage.morphology.(dilation|erosion|opening|closing)` instead" +# ":FutureWarning" +# ) + + def test_non_square_image(): footprint = footprint_rectangle((3, 3)) binary_res = morphology.binary_erosion(bw_img[:100, :200], footprint) @@ -117,7 +130,7 @@ def test_rectangle_decomposition(function, nrows, ncols, decomposition): @pytest.mark.parametrize("n", (0, 1, 2, 3, 4, 5)) @pytest.mark.parametrize("decomposition", ["sequence"]) @pytest.mark.filterwarnings( - "ignore:.*falling back to decomposition='separable':UserWarning:skimage" + "ignore:.*falling back to decomposition='separable':UserWarning" ) def test_octagon_decomposition(function, m, n, decomposition): """Validate footprint decomposition for various shapes. @@ -188,7 +201,7 @@ def test_cube_decomposition(function, shape, decomposition): @pytest.mark.parametrize("radius", (1, 2, 3)) @pytest.mark.parametrize("decomposition", ["sequence"]) @pytest.mark.filterwarnings( - "ignore:.*falling back to decomposition='separable':UserWarning:skimage" + "ignore:.*falling back to decomposition='separable':UserWarning" ) def test_octahedron_decomposition(function, radius, decomposition): """Validate footprint decomposition for various shapes. @@ -378,3 +391,20 @@ def test_tuple_as_footprint(function, ndim): expected = func(img, footprint=footprint_ndarray) out = func(img, footprint=footprint_shape) testing.assert_array_equal(expected, out) + + +# So far we have not yet deprecated binary_* morphology functions + +# @pytest.mark.parametrize( +# "func_name", +# ["binary_erosion", "binary_dilation", "binary_opening", "binary_closing"], +# ) +# def test_deprecation_warning(func_name): +# func = getattr(binary, func_name) +# footprint = footprint_rectangle((3, 3)) + +# regex = f"`{func_name}` is deprecated" +# with pytest.warns(FutureWarning, match=regex) as record: +# func(bw_img, footprint) +# assert_stacklevel(record) +# assert len(record) == 1 diff --git a/python/cucim/src/cucim/skimage/morphology/tests/test_gray.py b/python/cucim/src/cucim/skimage/morphology/tests/test_gray.py index 9e5a92e03..4f1fcc10f 100755 --- a/python/cucim/src/cucim/skimage/morphology/tests/test_gray.py +++ b/python/cucim/src/cucim/skimage/morphology/tests/test_gray.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2022-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp @@ -13,7 +13,6 @@ from cucim.skimage import color, morphology, transform from cucim.skimage._shared._warnings import expected_warnings -from cucim.skimage._shared.testing import assert_stacklevel from cucim.skimage.morphology import footprint_rectangle, gray from cucim.skimage.util import img_as_ubyte, img_as_uint @@ -448,7 +447,7 @@ def test_diamond_decomposition(cam_image, function, radius, decomposition): @pytest.mark.parametrize("n", (0, 1, 2, 3)) @pytest.mark.parametrize("decomposition", ["sequence"]) @pytest.mark.filterwarnings( - "ignore:.*falling back to decomposition='separable':UserWarning:skimage" + "ignore:.*falling back to decomposition='separable':UserWarning" ) def test_octagon_decomposition(cam_image, function, m, n, decomposition): """Validate footprint decomposition for various shapes. @@ -544,16 +543,3 @@ def test_tuple_as_footprint(function, ndim, odd_only): expected = func(img, footprint=footprint_ndarray) out = func(img, footprint=footprint_shape) testing.assert_array_equal(expected, out) - - -@pytest.mark.parametrize("func", [morphology.erosion, morphology.dilation]) -@pytest.mark.parametrize("name", ["shift_x", "shift_y"]) -@pytest.mark.parametrize("value", [True, False, None]) -def test_deprecated_shift(func, name, value): - img = cp.ones(10) - func(img) # Shouldn't warn - - regex = "`shift_x` and `shift_y` are deprecated" - with pytest.warns(FutureWarning, match=regex) as record: - func(img, **{name: value}) - assert_stacklevel(record) diff --git a/python/cucim/src/cucim/skimage/morphology/tests/test_isotropic.py b/python/cucim/src/cucim/skimage/morphology/tests/test_isotropic.py index fe59ae0bb..abe3d7fbb 100644 --- a/python/cucim/src/cucim/skimage/morphology/tests/test_isotropic.py +++ b/python/cucim/src/cucim/skimage/morphology/tests/test_isotropic.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2022-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp @@ -18,16 +18,14 @@ def test_non_square_image(): isotropic_res = morphology.isotropic_erosion(bw_img[:100, :200], 3) binary_res = img_as_bool( - morphology.binary_erosion(bw_img[:100, :200], morphology.disk(3)) + morphology.erosion(bw_img[:100, :200], morphology.disk(3)) ) assert_array_equal(isotropic_res, binary_res) def test_isotropic_erosion(): isotropic_res = morphology.isotropic_erosion(bw_img, 3) - binary_res = img_as_bool( - morphology.binary_erosion(bw_img, morphology.disk(3)) - ) + binary_res = img_as_bool(morphology.erosion(bw_img, morphology.disk(3))) assert_array_equal(isotropic_res, binary_res) @@ -52,34 +50,26 @@ def _disk_with_spacing( def test_isotropic_erosion_spacing(): isotropic_res = morphology.isotropic_dilation(bw_img, 6, spacing=(1, 2)) binary_res = img_as_bool( - morphology.binary_dilation( - bw_img, _disk_with_spacing(6, spacing=(1, 2)) - ) + morphology.dilation(bw_img, _disk_with_spacing(6, spacing=(1, 2))) ) assert_array_equal(isotropic_res, binary_res) def test_isotropic_dilation(): isotropic_res = morphology.isotropic_dilation(bw_img, 3) - binary_res = img_as_bool( - morphology.binary_dilation(bw_img, morphology.disk(3)) - ) + binary_res = img_as_bool(morphology.dilation(bw_img, morphology.disk(3))) assert_array_equal(isotropic_res, binary_res) def test_isotropic_closing(): isotropic_res = morphology.isotropic_closing(bw_img, 3) - binary_res = img_as_bool( - morphology.binary_closing(bw_img, morphology.disk(3)) - ) + binary_res = img_as_bool(morphology.closing(bw_img, morphology.disk(3))) assert_array_equal(isotropic_res, binary_res) def test_isotropic_opening(): isotropic_res = morphology.isotropic_opening(bw_img, 3) - binary_res = img_as_bool( - morphology.binary_opening(bw_img, morphology.disk(3)) - ) + binary_res = img_as_bool(morphology.opening(bw_img, morphology.disk(3))) assert_array_equal(isotropic_res, binary_res) @@ -87,7 +77,7 @@ def test_footprint_overflow(): img = cp.zeros((20, 20), dtype=bool) img[2:19, 2:19] = True isotropic_res = morphology.isotropic_erosion(img, 9) - binary_res = img_as_bool(morphology.binary_erosion(img, morphology.disk(9))) + binary_res = img_as_bool(morphology.erosion(img, morphology.disk(9))) assert_array_equal(isotropic_res, binary_res) diff --git a/python/cucim/src/cucim/skimage/morphology/tests/test_misc.py b/python/cucim/src/cucim/skimage/morphology/tests/test_misc.py index 3d18ea434..0427ca41c 100755 --- a/python/cucim/src/cucim/skimage/morphology/tests/test_misc.py +++ b/python/cucim/src/cucim/skimage/morphology/tests/test_misc.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp @@ -8,38 +8,53 @@ from numpy.testing import assert_equal from cucim.skimage._shared._warnings import expected_warnings +from cucim.skimage._shared.testing import assert_stacklevel from cucim.skimage.morphology import remove_small_holes, remove_small_objects # fmt: off -test_image = cp.array([[0, 0, 0, 1, 0], - [1, 1, 1, 0, 0], - [1, 1, 1, 0, 1]], bool) +test_object_image = cp.array([[0, 0, 0, 1, 0], + [1, 1, 1, 0, 0], + [1, 1, 1, 0, 1]], bool) # fmt: on def test_one_connectivity(): + # With connectivity=1, the biggest object has a size of 6 pixels, so + # `max_size=6` should remove everything + observed = remove_small_objects(test_object_image, max_size=6) + cp.testing.assert_array_equal(observed, cp.zeros_like(observed)) + # fmt: off expected = cp.array([[0, 0, 0, 0, 0], [1, 1, 1, 0, 0], [1, 1, 1, 0, 0]], bool) # fmt: on - observed = remove_small_objects(test_image, min_size=6) + observed = remove_small_objects(test_object_image, max_size=5) assert_array_equal(observed, expected) def test_two_connectivity(): + # With connectivity=1, the biggest object has a size of 7 pixels, so + # `max_size=7` should remove everything + observed = remove_small_objects( + test_object_image, max_size=7, connectivity=2 + ) + cp.testing.assert_array_equal(observed, cp.zeros_like(observed)) + # fmt: off expected = cp.array([[0, 0, 0, 1, 0], [1, 1, 1, 0, 0], [1, 1, 1, 0, 0]], bool) # fmt: on - observed = remove_small_objects(test_image, min_size=7, connectivity=2) + observed = remove_small_objects( + test_object_image, max_size=6, connectivity=2 + ) assert_array_equal(observed, expected) def test_in_place(): - image = test_image.copy() - observed = remove_small_objects(image, min_size=6, out=image) + image = test_object_image.copy() + observed = remove_small_objects(image, max_size=5, out=image) assert_equal( observed is image, True, @@ -50,8 +65,8 @@ def test_in_place(): @pytest.mark.parametrize("in_dtype", [bool, int, cp.int32]) @pytest.mark.parametrize("out_dtype", [bool, int, cp.int32]) def test_out(in_dtype, out_dtype): - image = test_image.astype(in_dtype, copy=True) - expected_out = cp.empty_like(test_image, dtype=out_dtype) + image = test_object_image.astype(in_dtype, copy=True) + expected_out = cp.empty_like(test_object_image, dtype=out_dtype) if out_dtype != bool: # object with only 1 label will warn on non-bool output dtype @@ -60,7 +75,7 @@ def test_out(in_dtype, out_dtype): exp_warn = [] with expected_warnings(exp_warn): - out = remove_small_objects(image, min_size=6, out=expected_out) + out = remove_small_objects(image, max_size=5, out=expected_out) assert out is expected_out @@ -76,7 +91,7 @@ def test_labeled_image(): [2, 0, 0, 0, 0], [0, 0, 3, 3, 3]], dtype=int) # fmt: on - observed = remove_small_objects(labeled_image, min_size=3) + observed = remove_small_objects(labeled_image, max_size=2) assert_array_equal(observed, expected) @@ -91,7 +106,7 @@ def test_uint_image(): [2, 0, 0, 0, 0], [0, 0, 3, 3, 3]], dtype=cp.uint8) # fmt: on - observed = remove_small_objects(labeled_image, min_size=3) + observed = remove_small_objects(labeled_image, max_size=2) assert_array_equal(observed, expected) @@ -102,7 +117,7 @@ def test_single_label_warning(): [1, 1, 1, 0, 0]], int) # fmt: on with expected_warnings(["use a boolean array?"]): - remove_small_objects(image, min_size=6) + remove_small_objects(image, max_size=5) def test_float_input(): @@ -117,6 +132,58 @@ def test_negative_input(): remove_small_objects(negative_int) +def test_remove_small_objects_deprecated_min_size(): + # Old min_size used exclusive threshold (< N), new max_size uses + # inclusive threshold (<= N). Verify backward compatibility: min_size=N + # should match max_size=N-1. + # fmt: off + expected = cp.array([[0, 0, 0, 0, 0], + [1, 1, 1, 0, 0], + [1, 1, 1, 0, 0]], bool) + # fmt: on + + observed = remove_small_objects(test_object_image, max_size=5) + assert_array_equal(observed, expected) + + regex = "Parameter `min_size` is deprecated" + with pytest.warns(FutureWarning, match=regex) as record: + observed = remove_small_objects(test_object_image, min_size=6) + assert_stacklevel(record) + assert_array_equal(observed, expected) + + # Misusing signature should raise + with pytest.warns(FutureWarning, match=regex): + with pytest.raises(ValueError, match=".* avoid conflicting values"): + remove_small_objects(test_object_image, min_size=6, max_size=5) + + +def test_remove_small_objects_deprecated_min_size_boundary(): + # Verify the boundary: an object of exactly size N should NOT be removed + # by min_size=N (old exclusive: < N) but SHOULD be removed by + # max_size=N (new inclusive: <= N). + # fmt: off + image = cp.array([[0, 0, 0, 1, 0], + [1, 1, 1, 0, 0], + [1, 1, 1, 0, 1]], bool) + # The 6-pixel object plus 1-pixel objects + # fmt: on + + # max_size=6 removes ALL objects (sizes 6, 1, 1 are all <= 6) + observed = remove_small_objects(image, max_size=6) + assert not observed.any() + + # min_size=6 used exclusive threshold (< 6), so the 6-pixel object + # is kept — it should match max_size=5 + with pytest.warns(FutureWarning): + observed = remove_small_objects(image, min_size=6) + # fmt: off + expected = cp.array([[0, 0, 0, 0, 0], + [1, 1, 1, 0, 0], + [1, 1, 1, 0, 0]], bool) + # fmt: on + assert_array_equal(observed, expected) + + # fmt: off test_holes_image = cp.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], @@ -140,7 +207,7 @@ def test_one_connectivity_holes(): [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], bool) # fmt: on - observed = remove_small_holes(test_holes_image, area_threshold=3) + observed = remove_small_holes(test_holes_image, max_size=2) assert_array_equal(observed, expected) @@ -155,15 +222,13 @@ def test_two_connectivity_holes(): [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], bool) # fmt: on - observed = remove_small_holes( - test_holes_image, area_threshold=3, connectivity=2 - ) + observed = remove_small_holes(test_holes_image, max_size=2, connectivity=2) assert_array_equal(observed, expected) def test_in_place_holes(): image = test_holes_image.copy() - observed = remove_small_holes(image, area_threshold=3, out=image) + observed = remove_small_holes(image, max_size=2, out=image) assert_equal( observed is image, True, "remove_small_holes in_place argument failed." ) @@ -172,7 +237,7 @@ def test_in_place_holes(): def test_out_remove_small_holes(): image = test_holes_image.copy() expected_out = cp.empty_like(image) - out = remove_small_holes(image, area_threshold=3, out=expected_out) + out = remove_small_holes(image, max_size=2, out=expected_out) assert out is expected_out @@ -181,7 +246,7 @@ def test_non_bool_out(): image = test_holes_image.copy() expected_out = cp.empty_like(image, dtype=int) with pytest.raises(TypeError): - remove_small_holes(image, area_threshold=3, out=expected_out) + remove_small_holes(image, max_size=2, out=expected_out) def test_labeled_image_holes(): @@ -205,7 +270,7 @@ def test_labeled_image_holes(): [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], dtype=bool) # fmt: on with expected_warnings(["returned as a boolean array"]): - observed = remove_small_holes(labeled_holes_image, area_threshold=3) + observed = remove_small_holes(labeled_holes_image, max_size=2) assert_array_equal(observed, expected) @@ -230,7 +295,7 @@ def test_uint_image_holes(): [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], dtype=bool) # fmt: on with expected_warnings(["returned as a boolean array"]): - observed = remove_small_holes(labeled_holes_image, area_threshold=3) + observed = remove_small_holes(labeled_holes_image, max_size=2) assert_array_equal(observed, expected) @@ -247,11 +312,65 @@ def test_label_warning_holes(): dtype=int) # fmt: on with expected_warnings(["use a boolean array?"]): - remove_small_holes(labeled_holes_image, area_threshold=3) - remove_small_holes(labeled_holes_image.astype(bool), area_threshold=3) + remove_small_holes(labeled_holes_image, max_size=2) + remove_small_holes(labeled_holes_image.astype(bool), max_size=2) def test_float_input_holes(): float_test = cp.random.rand(5, 5) with pytest.raises(TypeError): remove_small_holes(float_test) + + +def test_remove_small_holes_deprecated_area_threshold(): + # Old area_threshold=3 used exclusive threshold (< 3), equivalent to + # new max_size=2 which uses inclusive threshold (<= 2). + expected = cp.array( + [ + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + ], + bool, + ) + + observed = remove_small_holes(test_holes_image, max_size=2) + assert_array_equal(observed, expected) + + # area_threshold=3 (exclusive) should match max_size=2 (inclusive) + regex = "Parameter `area_threshold` is deprecated" + with pytest.warns(FutureWarning, match=regex) as record: + observed = remove_small_holes(test_holes_image, area_threshold=3) + assert_stacklevel(record) + cp.testing.assert_array_equal(observed, expected) + + # Misusing signature should raise + with pytest.warns(FutureWarning, match=regex): + with pytest.raises(ValueError, match=".* avoid conflicting values"): + remove_small_holes(test_holes_image, area_threshold=3, max_size=3) + with pytest.warns(FutureWarning, match=regex): + with pytest.raises(ValueError, match=".* avoid conflicting values"): + remove_small_holes(test_holes_image, 3, max_size=3) + + +def test_remove_small_holes_deprecated_threshold_boundary(): + # Verify that the exclusive-to-inclusive conversion is correct at the + # boundary: a hole of exactly size N should NOT be removed by + # area_threshold=N (old exclusive: < N) but SHOULD be removed by + # max_size=N (new inclusive: <= N). + image = cp.array([[1, 1, 1, 1, 1], [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]], bool) + + # max_size=3 removes holes of size <= 3, so the size-3 hole is filled + filled = remove_small_holes(image, max_size=3) + assert filled.all() + + # area_threshold=3 used exclusive threshold (< 3), so the size-3 hole + # is NOT removed — it should match max_size=2 + with pytest.warns(FutureWarning): + kept = remove_small_holes(image, area_threshold=3) + assert_array_equal(kept, image) diff --git a/python/cucim/src/cucim/skimage/registration/tests/test_masked_phase_cross_correlation.py b/python/cucim/src/cucim/skimage/registration/tests/test_masked_phase_cross_correlation.py index aa78a2ebb..7ec83ce76 100644 --- a/python/cucim/src/cucim/skimage/registration/tests/test_masked_phase_cross_correlation.py +++ b/python/cucim/src/cucim/skimage/registration/tests/test_masked_phase_cross_correlation.py @@ -1,7 +1,9 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause +from pathlib import Path + import cupy as cp import numpy as np import pytest @@ -11,7 +13,6 @@ from skimage.io import imread from cucim.skimage._shared.fft import fftmodule as fft -from cucim.skimage._shared.testing import fetch from cucim.skimage._shared.utils import _supported_float_type from cucim.skimage.registration._masked_phase_cross_correlation import ( _masked_phase_cross_correlation as masked_register_translation, @@ -139,17 +140,14 @@ def test_masked_registration_padfield_data(): # Test translated from MATLABimplementation `MaskedFFTRegistrationTest` # file. You can find the source code here: # http://www.dirkpadfield.com/Home/MaskedFFTRegistrationCode.zip + test_data_dir = Path(__file__).absolute().parent / "data" shifts = [(75, 75), (-130, 130), (130, 130)] for xi, yi in shifts: - fixed_image = cp.array( - imread(fetch(f"registration/tests/data/OriginalX{xi:d}Y{yi:d}.png")) - ) - moving_image = cp.array( - imread( - fetch(f"registration/tests/data/TransformedX{xi:d}Y{yi:d}.png") - ) - ) + fname = f"OriginalX{xi}Y{yi}.png" + fixed_image = cp.asarray(imread(test_data_dir / fname)) + fname = f"TransformedX{xi}Y{yi}.png" + moving_image = cp.asarray(imread(test_data_dir / fname)) # Valid pixels are 1 fixed_mask = fixed_image != 0 diff --git a/python/cucim/src/cucim/skimage/restoration/deconvolution.py b/python/cucim/src/cucim/skimage/restoration/deconvolution.py index a1344995a..be7b7a75f 100644 --- a/python/cucim/src/cucim/skimage/restoration/deconvolution.py +++ b/python/cucim/src/cucim/skimage/restoration/deconvolution.py @@ -1,8 +1,8 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause -"""Implementations restoration functions""" +"""Implementation of various restoration functions""" import warnings @@ -18,68 +18,72 @@ def wiener(image, psf, balance, reg=None, is_real=True, clip=True): - r"""Wiener-Hunt deconvolution + r"""Restore image using Wiener–Hunt deconvolution. - Return the deconvolution with a Wiener-Hunt approach (i.e. with - Fourier diagonalisation). + Wiener–Hunt deconvolution is a restoration method which follows a Bayesian + approach [1]_. Parameters ---------- - image : cp.ndarray - Input degraded image (can be n-dimensional). + image : (N1, N2, ..., ND) ndarray + Degraded image. psf : ndarray - Point Spread Function. This is assumed to be the impulse - response (input image space) if the data-type is real, or the - transfer function (Fourier space) if the data-type is - complex. There is no constraints on the shape of the impulse - response. The transfer function must be of shape + Point spread function (PSF). Assumed to be the impulse + response (input image space) if the data type is real, or the + transfer function (Fourier or frequency space) if the data type is + complex. There is no constraint on the shape of the impulse + response. The transfer function though must be of shape `(N1, N2, ..., ND)` if `is_real is True`, - `(N1, N2, ..., ND // 2 + 1)` otherwise (see `cp.fft.rfftn`). + `(N1, N2, ..., ND // 2 + 1)` otherwise (see :func:`numpy.fft.rfftn`). balance : float - The regularisation parameter value that tunes the balance - between the data adequacy that improve frequency restoration - and the prior adequacy that reduce frequency restoration (to - avoid noise artifacts). + Regularization parameter. Denoted by :math:`\lambda`: in the Notes + section below, its value lets you balance data adequacy (improving + frequency restoration) with respect to prior adequacy (reducing + frequency restoration and avoiding noise artifacts). A larger value for + this parameter favors the regularization/prior. reg : ndarray, optional - The regularisation operator. The Laplacian by default. It can - be an impulse response or a transfer function, as for the - psf. Shape constraint is the same as for the `psf` parameter. - is_real : boolean, optional - True by default. Specify if ``psf`` and ``reg`` are provided - with hermitian hypothesis, that is only half of the frequency - plane is provided (due to the redundancy of Fourier transform - of real signal). It's apply only if ``psf`` and/or ``reg`` are - provided as transfer function. For the hermitian property see - ``uft`` module or ``cupy.fft.rfftn``. - clip : boolean, optional - True by default. If True, pixel values of the result above 1 or - under -1 are thresholded for skimage pipeline compatibility. + Regularization operator. Laplacian by default. It can + be an impulse response or a transfer function, as for the PSF. + Shape constraints are the same as for `psf`. + is_real : bool, optional + True by default. Specify if `psf` and `reg` are provided over just half + the frequency space (thanks to the redundancy of the Fourier transform + for real signals). Applies only if `psf` and/or `reg` are + provided as transfer functions. + See ``uft`` module and :func:`np.fft.rfftn`. + clip : bool, optional + True by default. If True, pixel values of the deconvolved image (which + is the return value) above 1 (resp. below -1) are clipped to 1 (resp. + to -1). Be careful to set `clip=False` if you do not want this clipping + and/or if your data range is not [0, 1] or [-1,1]. Returns ------- - im_deconv : (M, N) ndarray + im_deconv : (N1, N2, ..., ND) ndarray The deconvolved image. Examples -------- >>> import cupy as cp - >>> import cupyx.scipy.ndimage as ndi - >>> from cucim.skimage import color, restoration + >>> import cupyx.scipy as sp >>> from skimage import data - >>> img = color.rgb2gray(cp.array(data.astronaut())) + >>> + >>> import cucim.skimage as ski + >>> img = ski.color.rgb2gray(cp.asarray(data.astronaut())) >>> psf = cp.ones((5, 5)) / 25 - >>> img = ndi.uniform_filter(img, size=psf.shape) - >>> img += 0.1 * img.std() * cp.random.standard_normal(img.shape) - >>> deconvolved_img = restoration.wiener(img, psf, 0.1) + >>> img = sp.signal.convolve2d(img, psf, 'same') + >>> rng = cp.random.default_rng() + >>> img += 0.1 * img.std() * rng.standard_normal(img.shape) + >>> deconvolved_img = ski.restoration.wiener(img, psf, 0.1) Notes ----- - This function applies the Wiener filter to a noisy and degraded + This function applies the Wiener filter to a noisy (degraded) image by an impulse response (or PSF). If the data model is .. math:: y = Hx + n - where :math:`n` is noise, :math:`H` the PSF and :math:`x` the + where :math:`n` is noise, :math:`H` the PSF, and :math:`x` the unknown original image, the Wiener filter is .. math:: @@ -88,18 +92,19 @@ def wiener(image, psf, balance, reg=None, is_real=True, clip=True): where :math:`F` and :math:`F^\dagger` are the Fourier and inverse Fourier transforms respectively, :math:`\Lambda_H` the transfer - function (or the Fourier transform of the PSF, see [Hunt] below) - and :math:`\Lambda_D` the filter to penalize the restored image - frequencies (Laplacian by default, that is penalization of high - frequency). The parameter :math:`\lambda` tunes the balance - between the data (that tends to increase high frequency, even - those coming from noise), and the regularization. + function (or the Fourier transform of the PSF, see [2]_), + and :math:`\Lambda_D` the regularization operator, which is a filter + penalizing the restored image frequencies (Laplacian by default, that is, + penalization of high frequencies). The parameter :math:`\lambda` tunes the + balance between data (which tends to increase high frequencies, even those + coming from noise) and regularization/prior (which tends to avoid noise + artifacts). These methods are then specific to a prior model. Consequently, the application or the true image nature must correspond to the - prior model. By default, the prior model (Laplacian) introduce + prior model. By default, the prior model (Laplacian) introduces image smoothness or pixel correlation. It can also be interpreted - as high-frequency penalization to compensate the instability of + as high-frequency penalization to compensate for the instability of the solution with respect to the data (sometimes called noise amplification or "explosive" solution). @@ -110,16 +115,14 @@ def wiener(image, psf, balance, reg=None, is_real=True, clip=True): ---------- .. [1] François Orieux, Jean-François Giovannelli, and Thomas Rodet, "Bayesian estimation of regularization and point - spread function parameters for Wiener-Hunt deconvolution", - J. Opt. Soc. Am. A 27, 1593-1607 (2010) - + spread function parameters for Wiener–Hunt deconvolution", + J. Opt. Soc. Am. A 27, 1593–1607 (2010) https://www.osapublishing.org/josaa/abstract.cfm?URI=josaa-27-7-1593 - https://hal.archives-ouvertes.fr/hal-00674508 .. [2] B. R. Hunt "A matrix theory proof of the discrete convolution theorem", IEEE Trans. on Audio and - Electroacoustics, vol. au-19, no. 4, pp. 285-288, dec. 1971 + Electroacoustics, vol. au-19, no. 4, pp. 285–288, dec. 1971 """ # noqa: E501 if reg is None: reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real) @@ -174,56 +177,52 @@ def unsupervised_wiener( Parameters ---------- image : (M, N) ndarray - The input degraded image. + The input degraded image. psf : ndarray - The impulse response (input image's space) or the transfer - function (Fourier space). Both are accepted. The transfer - function is automatically recognized as being complex - (``cupy.iscomplexobj(psf)``). + The impulse response (input image's space) or the transfer + function (Fourier space). Both are accepted. The transfer + function is automatically recognized as being complex + (``np.iscomplexobj(psf)``). reg : ndarray, optional - The regularisation operator. The Laplacian by default. It can - be an impulse response or a transfer function, as for the psf. + The regularisation operator. The Laplacian by default. It can + be an impulse response or a transfer function, as for the psf. user_params : dict, optional - Dictionary of parameters for the Gibbs sampler. See below. - clip : boolean, optional - True by default. If true, pixel values of the result above 1 or - under -1 are thresholded for skimage pipeline compatibility. - rng : {`cupy.random.Generator`, int}, optional + Dictionary of parameters for the Gibbs sampler. Accepted keys are: + + threshold : float + The stopping criterion: the norm of the difference between to + successive approximated solution (empirical mean of object + samples, see Notes section). 1e-4 by default. + burnin : int + The number of sample to ignore to start computation of the + mean. 15 by default. + min_num_iter : int + The minimum number of iterations. 30 by default. + max_num_iter : int + The maximum number of iterations if ``threshold`` is not + satisfied. 200 by default. + callback : callable + A user provided callable to which is passed, if the function + exists, the current image sample for whatever purpose. The user + can store the sample, or compute other moments than the + mean. It has no influence on the algorithm execution and is + only for inspection. + + clip : bool, optional + True by default. If true, pixel values of the result above 1 or + under -1 are thresholded for skimage pipeline compatibility. + rng : {`numpy.random.Generator`, int}, optional Pseudo-random number generator. - By default, a PCG64 generator is used - (see :func:`cupy.random.default_rng`). + By default, a PCG64 generator is used (see :func:`numpy.random.default_rng`). If `rng` is an int, it is used to seed the generator. Returns ------- x_postmean : (M, N) ndarray - The deconvolved image (the posterior mean). + The deconvolved image (the posterior mean). chains : dict - The keys ``noise`` and ``prior`` contain the chain list of - noise and prior precision respectively. - - Other parameters - ---------------- - The keys of ``user_params`` are: - - threshold : float - The stopping criterion: the norm of the difference between to - successive approximated solution (empirical mean of object - samples, see Notes section). 1e-4 by default. - burnin : int - The number of sample to ignore to start computation of the - mean. 15 by default. - min_num_iter : int - The minimum number of iterations. 30 by default. - max_num_iter : int - The maximum number of iterations if ``threshold`` is not - satisfied. 200 by default. - callback : callable (None by default) - A user provided callable to which is passed, if the function - exists, the current image sample for whatever purpose. The user - can store the sample, or compute other moments than the - mean. It has no influence on the algorithm execution and is - only for inspection. + The keys ``noise`` and ``prior`` contain the chain list of + noise and prior precision respectively. Examples -------- @@ -413,10 +412,10 @@ def richardson_lucy(image, psf, num_iter=50, clip=True, filter_epsilon=None): num_iter : int, optional Number of iterations. This parameter plays the role of regularisation. - clip : boolean, optional + clip : bool, optional True by default. If true, pixel value of the result above 1 or under -1 are thresholded for skimage pipeline compatibility. - filter_epsilon: float, optional + filter_epsilon : float, optional Value below which intermediate results become 0 to avoid division by small numbers. diff --git a/python/cucim/src/cucim/skimage/restoration/j_invariant.py b/python/cucim/src/cucim/skimage/restoration/j_invariant.py index 3413b75ac..0705fc60a 100644 --- a/python/cucim/src/cucim/skimage/restoration/j_invariant.py +++ b/python/cucim/src/cucim/skimage/restoration/j_invariant.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import functools @@ -112,7 +112,7 @@ def denoise_invariant( masks : list of ndarray, optional Set of masks to use for computing J-invariant output. If `None`, a full set of masks covering the image will be used. - denoiser_kwargs: + denoiser_kwargs : Keyword arguments passed to `denoise_function`. Returns diff --git a/python/cucim/src/cucim/skimage/restoration/tests/camera_rl.npy b/python/cucim/src/cucim/skimage/restoration/tests/camera_rl.npy new file mode 100644 index 000000000..a800d7195 Binary files /dev/null and b/python/cucim/src/cucim/skimage/restoration/tests/camera_rl.npy differ diff --git a/python/cucim/src/cucim/skimage/restoration/tests/camera_unsup.npy b/python/cucim/src/cucim/skimage/restoration/tests/camera_unsup.npy new file mode 100644 index 000000000..b15a1c5fb Binary files /dev/null and b/python/cucim/src/cucim/skimage/restoration/tests/camera_unsup.npy differ diff --git a/python/cucim/src/cucim/skimage/restoration/tests/camera_unsup2.npy b/python/cucim/src/cucim/skimage/restoration/tests/camera_unsup2.npy new file mode 100644 index 000000000..5cfd86a74 Binary files /dev/null and b/python/cucim/src/cucim/skimage/restoration/tests/camera_unsup2.npy differ diff --git a/python/cucim/src/cucim/skimage/restoration/tests/camera_wiener.npy b/python/cucim/src/cucim/skimage/restoration/tests/camera_wiener.npy new file mode 100644 index 000000000..d81df74db Binary files /dev/null and b/python/cucim/src/cucim/skimage/restoration/tests/camera_wiener.npy differ diff --git a/python/cucim/src/cucim/skimage/restoration/tests/test_restoration.py b/python/cucim/src/cucim/skimage/restoration/tests/test_restoration.py index 3cc860a1d..9e95a86f1 100644 --- a/python/cucim/src/cucim/skimage/restoration/tests/test_restoration.py +++ b/python/cucim/src/cucim/skimage/restoration/tests/test_restoration.py @@ -1,7 +1,9 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause +from pathlib import Path + import cupy as cp import numpy as np import pytest @@ -30,6 +32,7 @@ def astronaut(): test_img = camera() +test_data_dir = Path(__file__).absolute().parent def _get_rtol_atol(dtype): @@ -57,8 +60,8 @@ def test_wiener(dtype): assert deconvolved.dtype == _supported_float_type(dtype) rtol, atol = _get_rtol_atol(dtype) - path = fetch("restoration/tests/camera_wiener.npy") - cp.testing.assert_allclose(deconvolved, np.load(path), rtol=rtol, atol=atol) + reference = np.load(test_data_dir / "camera_wiener.npy") + cp.testing.assert_allclose(deconvolved, reference, rtol=rtol, atol=atol) _, laplacian = uft.laplacian(2, data.shape) otf = uft.ir2tf(psf, data.shape, is_real=False) @@ -68,7 +71,7 @@ def test_wiener(dtype): ) assert deconvolved.real.dtype == _supported_float_type(dtype) cp.testing.assert_allclose( - cp.real(deconvolved), np.load(path), rtol=rtol, atol=atol + cp.real(deconvolved), reference, rtol=rtol, atol=atol ) @@ -158,8 +161,8 @@ def test_richardson_lucy(): psf = cp.asarray(psf) deconvolved = restoration.richardson_lucy(data, psf, 5) - path = fetch("restoration/tests/camera_rl.npy") - cp.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-4) + reference = np.load(test_data_dir / "camera_rl.npy") + cp.testing.assert_allclose(deconvolved, reference, rtol=1e-4) @pytest.mark.parametrize("dtype_image", [cp.float16, cp.float32, cp.float64]) @@ -179,6 +182,8 @@ def test_richardson_lucy_filtered(dtype_image, dtype_psf): ) deconvolved = restoration.richardson_lucy(data, psf, 5, filter_epsilon=1e-6) assert deconvolved.dtype == _supported_float_type(data.dtype) - - path = fetch("restoration/tests/astronaut_rl.npy") + try: + path = fetch("restoration/astronaut_rl.npy") + except KeyError: + path = fetch("restoration/tests/astronaut_rl.npy") cp.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3, atol=atol) diff --git a/python/cucim/src/cucim/skimage/restoration/uft.py b/python/cucim/src/cucim/skimage/restoration/uft.py index f0d3dacd7..6c2a1fd19 100644 --- a/python/cucim/src/cucim/skimage/restoration/uft.py +++ b/python/cucim/src/cucim/skimage/restoration/uft.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause r"""Function of unitary fourier transform (uft) and utilities @@ -378,7 +378,7 @@ def ir2tf(imp_resp, shape, dim=None, is_real=True): dim : int, optional The last axis along which to compute the transform. All axes by default. - is_real : boolean, optional + is_real : bool, optional If True (default), imp_resp is supposed real and the Hermitian property is used with rfftn Fourier transform. @@ -437,7 +437,7 @@ def laplacian(ndim, shape, is_real=True, *, dtype=None): The dimension of the Laplacian. shape : tuple The support on which to compute the transfer function. - is_real : boolean, optional + is_real : bool, optional If True (default), imp_resp is assumed to be real-valued and the Hermitian property is used with rfftn Fourier transform to return the transfer function. diff --git a/python/cucim/src/cucim/skimage/segmentation/__init__.pyi b/python/cucim/src/cucim/skimage/segmentation/__init__.pyi new file mode 100644 index 000000000..09d6210a8 --- /dev/null +++ b/python/cucim/src/cucim/skimage/segmentation/__init__.pyi @@ -0,0 +1,9 @@ +# SPDX-FileCopyrightText: 2009-2022 the scikit-image team +# SPDX-FileCopyrightText: Copyright (c) 2026, NVIDIA CORPORATION. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause + +# Algorithms to partition images into meaningful regions or boundaries + +import lazy_loader as lazy + +__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__) diff --git a/python/cucim/src/cucim/skimage/transform/_geometric.py b/python/cucim/src/cucim/skimage/transform/_geometric.py index a709049a3..68e2bfffc 100644 --- a/python/cucim/src/cucim/skimage/transform/_geometric.py +++ b/python/cucim/src/cucim/skimage/transform/_geometric.py @@ -1,18 +1,24 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause -# TODO: not yet converted for GPU use - import math import textwrap +import warnings import cupy as cp import numpy as np from scipy import spatial from .._shared.compat import NP_COPY_IF_NEEDED -from .._shared.utils import get_bound_method_class, safe_as_int +from .._shared.utils import ( + FailedEstimation, + _deprecate_estimate, + _deprecate_inherited_estimate, + _update_from_estimate_docstring, + get_bound_method_class, + safe_as_int, +) _sin, _cos = math.sin, math.cos @@ -32,34 +38,22 @@ def _affine_matrix_from_vector(v): return matrix -def _center_and_normalize_points(points): - """Center and normalize image points. - - The points are transformed in a two-step procedure that is expressed - as a transformation matrix. The matrix of the resulting points is usually - better conditioned than the matrix of the original points. - - Center the image points, such that the new coordinate system has its - origin at the centroid of the image points. +def _calc_center_normalize(points, scaling="rms"): + """Calculate transformation matrix to center and normalize points. - Normalize the image points, such that the mean distance from the points - to the origin of the coordinate system is sqrt(D). - - If the points are all identical, the returned values will contain nan. + Points are centered at their centroid, then scaled according to `scaling`. Parameters ---------- points : (N, D) ndarray The coordinates of the image points. + scaling : {'rms', 'mrs', 'raw'}, optional + Scaling mode. 'raw' performs no centering/scaling. Returns ------- matrix : (D+1, D+1) ndarray - The transformation matrix to obtain the new points. - new_points : (N, D) ndarray - The transformed image points. - has_nan : bool - Indicates if all points were identical causing rms=0. + The homogeneous transformation matrix. References ---------- @@ -68,14 +62,21 @@ def _center_and_normalize_points(points): (1997): 580-593. """ - # TODO: grlee77: exclude numpy arrays? xp = cp.get_array_module(points) n, d = points.shape + scaling = scaling.lower() - centroid = xp.mean(points, axis=0) + if scaling == "raw": + return xp.eye(d + 1) + centroid = xp.mean(points, axis=0) diff = points - centroid - rms = math.sqrt(xp.sum(diff * diff) / points.shape[0]) + if scaling == "rms": + divisor = xp.sqrt(xp.mean(diff * diff)) + elif scaling == "mrs": + divisor = xp.mean(xp.sqrt(xp.sum(diff * diff, axis=1))) / math.sqrt(d) + else: + raise ValueError(f'Unexpected "scaling" of "{scaling}"') # if all the points are the same, the transformation matrix cannot be # created. We return an equivalent matrix with np.nans as sentinel values. @@ -83,31 +84,41 @@ def _center_and_normalize_points(points): # one, and those are only needed when actual 0 is reached, rather than some # small value; ie, we don't need to worry about numerical stability here, # only actual 0. - if rms == 0: - return ( - xp.full((d + 1, d + 1), xp.nan), - xp.full_like(points, xp.nan), - True, - ) + if divisor == 0: + return xp.full((d + 1, d + 1), xp.nan) + matrix = xp.eye(d + 1) + matrix[:d, d] = -centroid + matrix[:d, :] /= divisor + return matrix - norm_factor = math.sqrt(d) / rms - matrix = xp.concatenate( - ( - norm_factor - * xp.concatenate((xp.eye(d), -centroid[:, xp.newaxis]), axis=1), - xp.asarray([[0] * d + [1]]), - ), - axis=0, - ) +def _center_and_normalize_points(points, scaling="rms"): + """Convenience function to compute and apply center/normalize.""" + xp = cp.get_array_module(points) + matrix = _calc_center_normalize(points, scaling=scaling) + if not xp.all(xp.isfinite(matrix)): + return matrix + xp.nan, xp.full_like(points, xp.nan) + return matrix, _apply_homogeneous(matrix, points) - points_h = xp.concatenate([points.T, xp.ones((1, n))], axis=0) - new_points_h = (matrix @ points_h).T - new_points = new_points_h[:, :d] - new_points /= new_points_h[:, d:] +def _append_homogeneous_dim(points): + xp = cp.get_array_module(points) + points = xp.asarray(points) + return xp.concatenate( + (points, xp.ones((len(points), 1), dtype=points.dtype)), axis=1 + ) + - return matrix, new_points, False +def _apply_homogeneous(matrix, points): + xp = cp.get_array_module(points) + copy_arr = NP_COPY_IF_NEEDED if xp is np else False + points = xp.array(points, copy=copy_arr, ndmin=2) + points_h = _append_homogeneous_dim(points) + new_points_h = points_h @ matrix.T + divs = new_points_h[:, -1] + eps = np.finfo(float).eps + divs = xp.where(divs == 0, eps, divs) + return new_points_h[:, :-1] / divs[:, None] def _umeyama(src, dst, estimate_scale): @@ -161,8 +172,9 @@ def _umeyama(src, dst, estimate_scale): U, S, V = xp.linalg.svd(A) - # Eq. (40) and (43). - rank = xp.linalg.matrix_rank(A) + # Eq. (40) and (43). Rank calculation from SVD. + tol = S.max() * max(A.shape) * np.finfo(float).eps + rank = xp.count_nonzero(S > tol) if rank == 0: return xp.nan * T elif rank == dim - 1: @@ -249,6 +261,33 @@ def __add__(self, other): """Combine this transformation with another.""" raise NotImplementedError() + @classmethod + def identity(cls, dimensionality=None, xp=cp): + d = 2 if dimensionality is None else dimensionality + return cls(dimensionality=d, xp=xp) + + @classmethod + def from_estimate(cls, src, dst, *args, **kwargs): + """Estimate transform from matching source/destination coordinates.""" + return _from_estimate(cls, src, dst, *args, **kwargs) + + +def _from_estimate(cls, src, dst, *args, **kwargs): + xp = cp.get_array_module(src) + tf = cls.identity(src.shape[1], xp=xp) + estimate_func = getattr(tf, "_estimate", None) + if estimate_func is None: + result = tf.estimate(src, dst, *args, **kwargs) + msg = None if result else "Estimation failed" + else: + msg = estimate_func(src, dst, *args, **kwargs) + # Backward compatibility if an _estimate still returns bool. + if msg is True: + msg = None + elif msg is False: + msg = "Estimation failed" + return tf if msg is None else FailedEstimation(f"{cls.__name__}: {msg}") + class FundamentalMatrixTransform(GeometricTransform): """Fundamental matrix transformation. @@ -333,6 +372,8 @@ class FundamentalMatrixTransform(GeometricTransform): # CuPy Backend: if matrix is None cannot infer array module from it # added explicit xp module argument for now + scaling = "rms" + def __init__(self, matrix=None, *, dimensionality=2, xp=cp): if matrix is None: # default to an identity transform @@ -362,9 +403,7 @@ def __call__(self, coords): Epipolar lines in the destination image. """ - xp = cp.get_array_module(coords) - coords_homogeneous = xp.column_stack([coords, xp.ones(coords.shape[0])]) - return coords_homogeneous @ self.params.T + return _append_homogeneous_dim(coords) @ self.params.T def inverse(self, coords): """Apply inverse transformation. @@ -380,9 +419,7 @@ def inverse(self, coords): Epipolar lines in the source image. """ - xp = cp.get_array_module(coords) - coords_homogeneous = xp.column_stack([coords, xp.ones(coords.shape[0])]) - return coords_homogeneous @ self.params + return _append_homogeneous_dim(coords) @ self.params def _setup_constraint_matrix(self, src, dst): """Setup and solve the homogeneous epipolar constraint matrix:: @@ -415,25 +452,17 @@ def _setup_constraint_matrix(self, src, dst): raise ValueError("src.shape[0] must be equal or larger than 8.") xp = cp.get_array_module(src) - # Center and normalize image points for better numerical stability. - try: - src_matrix, src, has_nan1 = _center_and_normalize_points(src) - dst_matrix, dst, has_nan2 = _center_and_normalize_points(dst) - except ZeroDivisionError: - self.params = xp.full((3, 3), xp.nan) - return 3 * [xp.full((3, 3), xp.nan)] - - if has_nan1 or has_nan2: + src_matrix = _calc_center_normalize(src, self.scaling) + dst_matrix = _calc_center_normalize(dst, self.scaling) + if xp.any(xp.isnan(src_matrix + dst_matrix)): self.params = xp.full((3, 3), xp.nan) return 3 * [xp.full((3, 3), xp.nan)] + src_h = _append_homogeneous_dim(_apply_homogeneous(src_matrix, src)) + dst_h = _append_homogeneous_dim(_apply_homogeneous(dst_matrix, dst)) # Setup homogeneous linear equation as dst' * F * src = 0. - A = xp.ones((src.shape[0], 9)) - A[:, :2] = src - A[:, :3] *= dst[:, 0, xp.newaxis] - A[:, 3:5] = src - A[:, 3:6] *= dst[:, 1, xp.newaxis] - A[:, 6:8] = src + cols = [(d_v * s_v) for d_v in dst_h.T for s_v in src_h.T] + A = xp.stack(cols, axis=1) # Solve for the nullspace of the constraint matrix. _, _, V = xp.linalg.svd(A) @@ -441,7 +470,12 @@ def _setup_constraint_matrix(self, src, dst): return F_normalized, src_matrix, dst_matrix - def estimate(self, src, dst): + @classmethod + def from_estimate(cls, src, dst): + """Estimate fundamental matrix using the 8-point algorithm.""" + return super().from_estimate(src, dst) + + def _estimate(self, src, dst): """Estimate fundamental matrix using 8-point algorithm. The 8-point algorithm requires at least 8 corresponding point pairs for @@ -461,21 +495,27 @@ def estimate(self, src, dst): True, if model estimation succeeds. """ - xp = cp.get_array_module(src) - F_normalized, src_matrix, dst_matrix = self._setup_constraint_matrix( src, dst ) + xp = cp.get_array_module(F_normalized) + if xp.any(xp.isnan(F_normalized + src_matrix + dst_matrix)): + return "Scaling failed for input points" # Enforcing the internal constraint that two singular values must be - # non-zero and one must be zero. + # non-zero and one must be zero (rank 2). U, S, V = xp.linalg.svd(F_normalized) S[2] = 0 F = U @ xp.diag(S) @ V self.params = dst_matrix.T @ F @ src_matrix - return True + return None + + @_deprecate_estimate + def estimate(self, src, dst): + """Backward-compatible estimate method.""" + return self._estimate(src, dst) is None def residuals(self, src, dst): """Compute the Sampson distance. @@ -496,8 +536,8 @@ def residuals(self, src, dst): """ xp = cp.get_array_module(src) - src_homogeneous = xp.column_stack([src, xp.ones(src.shape[0])]) - dst_homogeneous = xp.column_stack([dst, xp.ones(dst.shape[0])]) + src_homogeneous = _append_homogeneous_dim(src) + dst_homogeneous = _append_homogeneous_dim(dst) F_src = self.params @ src_homogeneous.T Ft_dst = self.params.T @ dst_homogeneous.T @@ -578,48 +618,59 @@ class EssentialMatrixTransform(FundamentalMatrixTransform): 0.32453118, 0.00210776, 0.26512283]) """ - # CuPy Backend: if matrix is None cannot infer array module from it - # added explicit xp module argument for now + _rot_det_tol = 1e-6 + _trans_len_tol = 1e-6 + def __init__( self, + *, rotation=None, translation=None, matrix=None, - *, dimensionality=2, xp=cp, ): - super().__init__(matrix=matrix, dimensionality=dimensionality) - if rotation is not None: - if translation is None: - raise ValueError("Both rotation and translation required") - if rotation.shape != (3, 3): - raise ValueError("Invalid shape of rotation matrix") - if abs(xp.linalg.det(rotation) - 1) > 1e-6: - raise ValueError("Rotation matrix must have unit determinant") - if translation.size != 3: - raise ValueError("Invalid shape of translation vector") - if abs(xp.linalg.norm(translation) - 1) > 1e-6: - raise ValueError("Translation vector must have unit length") - # Matrix representation of the cross product for t. - if isinstance(translation, cp.ndarray): - translation = cp.asnumpy(translation) - # CuPy Backend: TODO: always keep t_x, rotation, etc. on host? - # fmt: off - t_x = xp.array([0, -translation[2], translation[1], - translation[2], 0, -translation[0], - -translation[1], translation[0], 0]).reshape(3, 3) - # fmt: on - self.params = t_x @ rotation - elif matrix is not None: - if matrix.shape != (3, 3): - raise ValueError("Invalid shape of transformation matrix") - self.params = matrix - else: - # default to an identity transform - self.params = xp.eye(3) + n_rt_none = sum(p is None for p in (rotation, translation)) + if n_rt_none == 1: + raise ValueError( + "Both rotation and translation required when one is specified." + ) + if n_rt_none == 0: + if matrix is not None: + raise ValueError( + "Do not specify rotation or translation when matrix is specified." + ) + matrix = self._rt2matrix(rotation, translation, xp=xp) + super().__init__(matrix=matrix, dimensionality=dimensionality, xp=xp) - def estimate(self, src, dst): + def _rt2matrix(self, rotation, translation, xp=cp): + # Compute small matrix on CPU then transfer to GPU as needed. + if isinstance(rotation, cp.ndarray): + rotation = cp.asnumpy(rotation) + else: + rotation = np.asarray(rotation) + if isinstance(translation, cp.ndarray): + translation = cp.asnumpy(translation) + else: + translation = np.asarray(translation) + if rotation.shape != (3, 3): + raise ValueError("Invalid shape of rotation matrix") + if abs(np.linalg.det(rotation) - 1) > self._rot_det_tol: + raise ValueError("Rotation matrix must have unit determinant") + if translation.size != 3: + raise ValueError("Invalid shape of translation vector") + if abs(np.linalg.norm(translation) - 1) > self._trans_len_tol: + raise ValueError("Translation vector must have unit length") + t0, t1, t2 = [float(translation[i]) for i in range(3)] + t_x = np.array([[0, -t2, t1], [t2, 0, -t0], [-t1, t0, 0]], dtype=float) + return xp.asarray(t_x @ rotation) + + @classmethod + def from_estimate(cls, src, dst): + """Estimate essential matrix using the 8-point algorithm.""" + return super().from_estimate(src, dst) + + def _estimate(self, src, dst): """Estimate essential matrix using 8-point algorithm. The 8-point algorithm requires at least 8 corresponding point pairs for @@ -640,10 +691,12 @@ def estimate(self, src, dst): """ - xp = cp.get_array_module(src) E_normalized, src_matrix, dst_matrix = self._setup_constraint_matrix( src, dst ) + xp = cp.get_array_module(E_normalized) + if xp.any(xp.isnan(E_normalized + src_matrix + dst_matrix)): + return "Scaling failed for input points" # Enforcing the internal constraint that two singular values must be # equal and one must be zero. @@ -655,7 +708,12 @@ def estimate(self, src, dst): self.params = dst_matrix.T @ E @ src_matrix - return True + return None + + @_deprecate_estimate + def estimate(self, src, dst): + """Backward-compatible estimate method.""" + return self._estimate(src, dst) is None class ProjectiveTransform(GeometricTransform): @@ -698,6 +756,8 @@ class ProjectiveTransform(GeometricTransform): """ + scaling = "rms" + def __init__(self, matrix=None, *, dimensionality=2, xp=cp): if matrix is None: # default to an identity transform @@ -707,48 +767,22 @@ def __init__(self, matrix=None, *, dimensionality=2, xp=cp): if matrix.shape != (dimensionality + 1, dimensionality + 1): raise ValueError("invalid shape of transformation matrix") self.params = matrix - self._coeffs = range(matrix.size - 1) + + @property + def _coeff_inds(self): + return range(self.params.size - 1) @property def _inv_matrix(self): xp = cp.get_array_module(self.params) return xp.linalg.inv(self.params) - def _apply_mat(self, coords, matrix): - xp = cp.get_array_module(coords) - ndim = matrix.shape[0] - 1 - copy = NP_COPY_IF_NEEDED if xp == np else False - coords = xp.array(coords, copy=copy, ndmin=2) - - src = xp.concatenate([coords, xp.ones((coords.shape[0], 1))], axis=1) - dst = src @ matrix.T - - # below, we will divide by the last dimension of the homogeneous - # coordinate matrix. In order to avoid division by zero, - # we replace exact zeros in this column with a very small number. - if xp is np: - dst[dst[:, ndim] == 0, ndim] = np.finfo(float).eps - else: - # indexing as above not supported by CuPy - tmp = dst[:, ndim] - idx = cp.where(tmp == 0) # synchronize - tmp[idx] = np.finfo(float).eps - dst[:, ndim] = tmp - - # rescale to homogeneous coordinates - dst[:, :ndim] /= dst[:, ndim : ndim + 1] - - return dst[:, :ndim] - def __array__(self, dtype=None, copy=None): """Return the transform parameters as an array. Note, __array__ is not currently supported by CuPy """ - if dtype is None: - return self.params - else: - return self.params.astype(dtype) + return self.params if dtype is None else self.params.astype(dtype) def __call__(self, coords): """Apply forward transformation. @@ -764,7 +798,7 @@ def __call__(self, coords): Destination coordinates. """ - return self._apply_mat(coords, self.params) + return _apply_homogeneous(self.params, coords) def inverse(self, coords): """Apply inverse transformation. @@ -780,9 +814,18 @@ def inverse(self, coords): Source coordinates. """ - return self._apply_mat(coords, self._inv_matrix) + return _apply_homogeneous(self._inv_matrix, coords) + + @classmethod + def from_estimate(cls, src, dst, weights=None): + """Estimate projective transform from corresponding points.""" + return super().from_estimate(src, dst, weights) + @_deprecate_estimate def estimate(self, src, dst, weights=None): + return self._estimate(src, dst, weights) is None + + def _estimate(self, src, dst, weights=None): """Estimate the transformation from a set of corresponding points. You can determine the over-, well- and under-determined parameters @@ -850,11 +893,15 @@ def estimate(self, src, dst, weights=None): xp = cp.get_array_module(src) n, d = src.shape - src_matrix, src, has_nan1 = _center_and_normalize_points(src) - dst_matrix, dst, has_nan2 = _center_and_normalize_points(dst) - if has_nan1 or has_nan2: + src_matrix, src = _center_and_normalize_points( + src, scaling=self.scaling + ) + dst_matrix, dst = _center_and_normalize_points( + dst, scaling=self.scaling + ) + if not xp.all(xp.isfinite(src_matrix + dst_matrix)): self.params = xp.full((d + 1, d + 1), xp.nan) - return False + return "Scaling generated NaN values" # params: a0, a1, a2, b0, b1, b2, c0, c1 A = xp.zeros((n * d, (d + 1) ** 2)) for ddim in range(d): @@ -867,7 +914,7 @@ def estimate(self, src, dst, weights=None): A[ddim * n : (ddim + 1) * n, -d - 1 :] *= -dst[:, ddim : (ddim + 1)] # Select relevant columns, depending on params - A = A[:, list(self._coeffs) + [-1]] + A = A[:, list(self._coeff_inds) + [-1]] # Get the vectors that correspond to singular values, also applying # the weighting if provided @@ -883,21 +930,13 @@ def estimate(self, src, dst, weights=None): # to a line rather than a plane. if xp.isclose(V[-1, -1], 0): self.params = xp.full((d + 1, d + 1), xp.nan) - return False + return "Right singular vector has 0 final element" - H = np.zeros( - (d + 1, d + 1) - ) # np here because .flat not implemented in CuPy + H = np.zeros((d + 1, d + 1)) # solution is right singular vector that corresponds to smallest # singular value - try: - H.flat[list(self._coeffs.get()) + [-1]] = -V[-1, :-1] / V[-1, -1] - except AttributeError: - try: - V = V.get() - except AttributeError: - pass - H.flat[list(self._coeffs) + [-1]] = -V[-1, :-1] / V[-1, -1] + V_cpu = cp.asnumpy(V) if isinstance(V, cp.ndarray) else V + H.flat[list(self._coeff_inds) + [-1]] = -V_cpu[-1, :-1] / V_cpu[-1, -1] H[d, d] = 1 H = xp.asarray(H) @@ -910,7 +949,7 @@ def estimate(self, src, dst, weights=None): self.params = H - return True + return None def __add__(self, other): """Combine this transformation with another.""" @@ -959,6 +998,8 @@ def dimensionality(self): return self.params.shape[0] - 1 +@_update_from_estimate_docstring +@_deprecate_inherited_estimate class AffineTransform(ProjectiveTransform): """Affine transformation. @@ -1070,76 +1111,55 @@ class AffineTransform(ProjectiveTransform): def __init__( self, matrix=None, + *, scale=None, rotation=None, shear=None, translation=None, - *, - dimensionality=2, + dimensionality=None, xp=cp, ): - params = any( - param is not None for param in (scale, rotation, shear, translation) + n_srst_none = sum( + p is None for p in (scale, rotation, shear, translation) ) - - # these parameters get overwritten if a higher-D matrix is given - self._coeffs = range(dimensionality * (dimensionality + 1)) - - if params and matrix is not None: - raise ValueError( - "You cannot specify the transformation matrix and" - " the implicit parameters at the same time." - ) - - if params and dimensionality > 2: - raise ValueError("Parameter input is only supported in 2D.") - elif matrix is not None: - if matrix.ndim != 2 or matrix.shape[0] != matrix.shape[1]: - raise ValueError("Invalid shape of transformation matrix.") - else: - dimensionality = matrix.shape[0] - 1 - nparam = dimensionality * (dimensionality + 1) - self._coeffs = range(nparam) - self.params = matrix - elif params: # note: 2D only - if scale is None: - scale = (1, 1) - if rotation is None: - rotation = 0 - if shear is None: - shear = 0 - if translation is None: - translation = (0, 0) - - if np.isscalar(scale): - sx = sy = scale - else: - sx, sy = scale - - if np.isscalar(shear): - shear_x, shear_y = (shear, 0) - else: - shear_x, shear_y = shear - - a0 = sx * ( - math.cos(rotation) + math.tan(shear_y) * math.sin(rotation) - ) - a1 = -sy * ( - math.tan(shear_x) * math.cos(rotation) + math.sin(rotation) + if n_srst_none != 4: + if matrix is not None: + raise ValueError( + "Do not specify implicit parameters when matrix is specified." + ) + if dimensionality is not None and dimensionality > 2: + raise ValueError( + "Implicit parameters only valid for 2D transforms." + ) + matrix = self._srst2matrix( + scale, rotation, shear, translation, xp=xp ) - a2 = translation[0] + if dimensionality is None: + dimensionality = 2 if matrix is None else matrix.shape[0] - 1 + super().__init__(matrix=matrix, dimensionality=dimensionality, xp=xp) - b0 = sx * ( - math.sin(rotation) - math.tan(shear_y) * math.cos(rotation) - ) - b1 = -sy * ( - math.tan(shear_x) * math.sin(rotation) - math.cos(rotation) - ) - b2 = translation[1] - self.params = xp.array([[a0, a1, a2], [b0, b1, b2], [0, 0, 1]]) - else: - # default to an identity transform - self.params = xp.eye(dimensionality + 1) + @property + def _coeff_inds(self): + return range(self.dimensionality * (self.dimensionality + 1)) + + def _srst2matrix(self, scale, rotation, shear, translation, xp=cp): + scale = (1, 1) if scale is None else scale + sx, sy = (scale, scale) if np.isscalar(scale) else scale + rotation = 0 if rotation is None else rotation + if not np.isscalar(rotation): + raise ValueError("rotation must be scalar for 2D transforms.") + shear = 0 if shear is None else shear + shear_x, shear_y = (shear, 0) if np.isscalar(shear) else shear + translation = (0, 0) if translation is None else translation + if np.isscalar(translation): + raise ValueError("translation must be length 2.") + a2, b2 = translation + + a0 = sx * (math.cos(rotation) + math.tan(shear_y) * math.sin(rotation)) + a1 = -sy * (math.tan(shear_x) * math.cos(rotation) + math.sin(rotation)) + b0 = sx * (math.sin(rotation) - math.tan(shear_y) * math.cos(rotation)) + b1 = -sy * (math.tan(shear_x) * math.sin(rotation) - math.cos(rotation)) + return xp.array([[a0, a1, a2], [b0, b1, b2], [0, 0, 1]]) @property def scale(self): @@ -1148,10 +1168,9 @@ def scale(self): return xp.sqrt(xp.sum(self.params * self.params, axis=0))[ : self.dimensionality ] - else: - ss = xp.sum(self.params * self.params, axis=0) - ss[1] = ss[1] / (math.tan(self.shear) ** 2 + 1) - return xp.sqrt(ss)[: self.dimensionality] + ss = xp.sum(self.params * self.params, axis=0) + ss[1] = ss[1] / (math.tan(self.shear) ** 2 + 1) + return xp.sqrt(ss)[: self.dimensionality] @property def rotation(self): @@ -1199,7 +1218,16 @@ def __init__(self): self.affines = None self.inverse_affines = None + @classmethod + def from_estimate(cls, src, dst): + """Estimate piecewise affine transform from corresponding points.""" + return super().from_estimate(src, dst) + + @_deprecate_estimate def estimate(self, src, dst): + return self._estimate(src, dst) is None + + def _estimate(self, src, dst): """Estimate the transformation from a set of corresponding points. Number of source and destination coordinates must match. @@ -1218,14 +1246,15 @@ def estimate(self, src, dst): """ - ndim = src.shape[1] + _, D = src.shape # forward piecewise affine # triangulate input positions into mesh xp = cp.get_array_module(src) # TODO: update if spatial.Delaunay become available in CuPy self._tesselation = spatial.Delaunay(cp.asnumpy(src)) - ok = True + fail_matrix = xp.full((D + 1, D + 1), xp.nan) + messages = [] # find affine mapping from source positions to destination self.affines = [] @@ -1236,8 +1265,12 @@ def estimate(self, src, dst): # vertices is deprecated and scheduled for removal in SciPy 1.11 tesselation_simplices = self._tesselation.vertices for tri in xp.asarray(tesselation_simplices): - affine = AffineTransform(dimensionality=ndim) - ok &= affine.estimate(src[tri, :], dst[tri, :]) + affine = AffineTransform.from_estimate(src[tri, :], dst[tri, :]) + if not affine: + messages.append( + f"Failure at forward simplex {len(self.affines)}: {affine}" + ) + affine = AffineTransform(fail_matrix.copy()) self.affines.append(affine) # inverse piecewise affine @@ -1252,11 +1285,15 @@ def estimate(self, src, dst): # vertices is deprecated and scheduled for removal in SciPy 1.11 inv_tesselation_simplices = self._inverse_tesselation.vertices for tri in xp.asarray(inv_tesselation_simplices): - affine = AffineTransform(dimensionality=ndim) - ok &= affine.estimate(dst[tri, :], src[tri, :]) + affine = AffineTransform.from_estimate(dst[tri, :], src[tri, :]) + if not affine: + messages.append( + f"Failure at inverse simplex {len(self.inverse_affines)}: {affine}" + ) + affine = AffineTransform(fail_matrix.copy()) self.inverse_affines.append(affine) - return ok + return "; ".join(messages) if messages else None def __call__(self, coords): """Apply forward transformation. @@ -1344,6 +1381,10 @@ def inverse(self, coords): return out + @classmethod + def identity(cls, dimensionality=None, xp=cp): + return cls() + def _euler_rotation_matrix(angles, degrees=False): """Produce an Euler rotation matrix from the given intrinsic rotation angles @@ -1420,69 +1461,74 @@ class EuclideanTransform(ProjectiveTransform): .. [1] https://en.wikipedia.org/wiki/Rotation_matrix#In_three_dimensions """ + _estimate_scale = False + def __init__( self, matrix=None, + *, rotation=None, translation=None, - *, - dimensionality=2, + dimensionality=None, xp=cp, ): - params_given = rotation is not None or translation is not None - - if params_given and matrix is not None: - raise ValueError( - "You cannot specify the transformation matrix and" - " the implicit parameters at the same time." - ) - elif matrix is not None: - if matrix.shape[0] != matrix.shape[1]: - raise ValueError("Invalid shape of transformation matrix.") - self.params = matrix - elif params_given: - if rotation is None: - dimensionality = len(translation) - if dimensionality == 2: - rotation = 0 - elif dimensionality == 3: - rotation = np.zeros(3) - else: - raise ValueError( - "Parameters cannot be specified for dimension " - f"{dimensionality} transforms" - ) - else: - if not np.isscalar(rotation) and len(rotation) != 3: - raise ValueError( - "Parameters cannot be specified for dimension " - f"{dimensionality} transforms" - ) - if translation is None: - translation = (0,) * dimensionality - - if dimensionality == 2: - # fmt: off - self.params = xp.array([ - [math.cos(rotation), - math.sin(rotation), 0], # NOQA - [math.sin(rotation), math.cos(rotation), 0], # NOQA - [ 0, 0, 1], # NOQA - ]) - # fmt: on - - elif dimensionality == 3: - self.params = xp.eye(dimensionality + 1) - self.params[:dimensionality, :dimensionality] = xp.asarray( - _euler_rotation_matrix(rotation) + n_rt_none = sum(p is None for p in (rotation, translation)) + if n_rt_none != 2: + if matrix is not None: + raise ValueError( + "Do not specify implicit parameters when matrix is specified." ) - self.params[0:dimensionality, dimensionality] = xp.asarray( - translation - ) - else: - # default to an identity transform - self.params = xp.eye(dimensionality + 1) + n_dims, chk_msg = self._rt2ndims_msg(rotation, translation) + if chk_msg is not None: + raise ValueError(chk_msg) + if dimensionality is not None and dimensionality != n_dims: + raise ValueError( + f"Dimensionality {dimensionality} does not match " + f"inferred transform dimensionality {n_dims}." + ) + matrix = self._rt2matrix(rotation, translation, n_dims, xp=xp) + if dimensionality is None: + dimensionality = 2 if matrix is None else matrix.shape[0] - 1 + super().__init__(matrix=matrix, dimensionality=dimensionality, xp=xp) + def _rt2ndims_msg(self, rotation, translation): + if rotation is not None: + n = 1 if np.isscalar(rotation) else len(rotation) + msg = ( + "rotation must be scalar (2D) or length 3 (3D)" + if n not in (1, 3) + else None + ) + return (2 if n == 1 else n), msg + if translation is not None: + return (2 if np.isscalar(translation) else len(translation)), None + return None, None + + def _rt2matrix(self, rotation, translation, n_dims, xp=cp): + if translation is None: + translation = (0,) * n_dims + if rotation is None: + rotation = 0 if n_dims == 2 else np.zeros(3) + matrix = xp.eye(n_dims + 1) + if n_dims == 2: + cos_r, sin_r = math.cos(rotation), math.sin(rotation) + matrix[:2, :2] = xp.array([[cos_r, -sin_r], [sin_r, cos_r]]) + elif n_dims == 3: + matrix[:3, :3] = xp.asarray(_euler_rotation_matrix(rotation)) + matrix[:n_dims, n_dims] = xp.asarray(translation) + return matrix + + @_deprecate_estimate def estimate(self, src, dst): + return self._estimate(src, dst) is None + + @classmethod + def from_estimate(cls, src, dst): + """Estimate Euclidean transform from corresponding points.""" + # Avoid ProjectiveTransform.from_estimate(weights=...) call path. + return _from_estimate(cls, src, dst) + + def _estimate(self, src, dst): """Estimate the transformation from a set of corresponding points. You can determine the over-, well- and under-determined parameters @@ -1504,11 +1550,15 @@ def estimate(self, src, dst): """ - self.params = _umeyama(src, dst, False) + self.params = _umeyama(src, dst, self._estimate_scale) # _umeyama will return nan if the problem is not well-conditioned. xp = cp.get_array_module(self.params) - return not xp.any(xp.isnan(self.params)) + return ( + "Poor conditioning for estimation" + if xp.any(xp.isnan(self.params)) + else None + ) @property def rotation(self): @@ -1527,6 +1577,8 @@ def translation(self): return self.params[0 : self.dimensionality, self.dimensionality] +@_update_from_estimate_docstring +@_deprecate_inherited_estimate class SimilarityTransform(EuclideanTransform): """Similarity transformation. @@ -1568,82 +1620,64 @@ class SimilarityTransform(EuclideanTransform): """ + _estimate_scale = True + def __init__( self, matrix=None, + *, scale=None, rotation=None, translation=None, - *, - dimensionality=2, + dimensionality=None, xp=cp, ): - self.params = None - params = any( - param is not None for param in (scale, rotation, translation) - ) - - if params and matrix is not None: - raise ValueError( - "You cannot specify the transformation matrix and" - " the implicit parameters at the same time." - ) - elif matrix is not None: - if matrix.ndim != 2 or matrix.shape[0] != matrix.shape[1]: - raise ValueError("Invalid shape of transformation matrix.") + n_srt_none = sum(p is None for p in (scale, rotation, translation)) + if n_srt_none != 3: + if matrix is not None: + raise ValueError( + "Do not specify implicit parameters when matrix is specified." + ) + self._check_scale(scale, (rotation, translation), dimensionality) + if scale is not None and not np.isscalar(scale): + n_dims, chk_msg = len(scale), None else: - self.params = matrix - dimensionality = matrix.shape[0] - 1 - if params: - if dimensionality not in (2, 3): - raise ValueError("Parameters only supported for 2D and 3D.") - matrix = xp.eye(dimensionality + 1, dtype=float) - if scale is None: - scale = 1 - if rotation is None: - rotation = 0 if dimensionality == 2 else (0, 0, 0) - if translation is None: - translation = (0,) * dimensionality - if dimensionality == 2: - c, s = _cos(rotation), _sin(rotation) - matrix[:2, :2] = xp.array([[c, -s], [s, c]], dtype=float) - else: # 3D rotation - matrix[:3, :3] = xp.asarray(_euler_rotation_matrix(rotation)) - - matrix[:dimensionality, :dimensionality] *= scale - matrix[:dimensionality, dimensionality] = xp.asarray(translation) - self.params = matrix - elif self.params is None: - # default to an identity transform - self.params = xp.eye(dimensionality + 1) - - def estimate(self, src, dst): - """Estimate the transformation from a set of corresponding points. - - You can determine the over-, well- and under-determined parameters - with the total least-squares method. - - Number of source and destination coordinates must match. - - Parameters - ---------- - src : (N, 2) ndarray - Source coordinates. - dst : (N, 2) ndarray - Destination coordinates. - - Returns - ------- - success : bool - True, if model estimation succeeds. - - """ - - self.params = _umeyama(src, dst, estimate_scale=True) - - # _umeyama will return nan if the problem is not well-conditioned. - xp = cp.get_array_module(self.params) - return not xp.any(xp.isnan(self.params)) + n_dims, chk_msg = self._rt2ndims_msg(rotation, translation) + if chk_msg is not None: + raise ValueError(chk_msg) + n_dims = ( + n_dims + if n_dims is not None + else dimensionality + if dimensionality is not None + else 2 + ) + if dimensionality is not None and dimensionality != n_dims: + raise ValueError( + f"Dimensionality {dimensionality} does not match " + f"inferred transform dimensionality {n_dims}." + ) + matrix = self._rt2matrix(rotation, translation, n_dims, xp=xp) + if scale not in (None, 1): + matrix[:n_dims, :n_dims] *= xp.asarray(scale) + if dimensionality is None: + dimensionality = 2 if matrix is None else matrix.shape[0] - 1 + super().__init__(matrix=matrix, dimensionality=dimensionality, xp=xp) + + def _check_scale(self, scale, other_params, dimensionality): + if ( + dimensionality in (None, 2) + or scale is None + or not np.isscalar(scale) + ): + return + if all(p is None for p in other_params): + warnings.warn( + "Passing scalar `scale` with dimensionality > 2 and no other " + "implicit parameters is deprecated.", + FutureWarning, + stacklevel=2, + ) @property def scale(self): @@ -1680,19 +1714,31 @@ class PolynomialTransform(GeometricTransform): """ - def __init__(self, params=None, *, dimensionality=2, xp=cp): - if dimensionality != 2: + def __init__(self, params=None, *, dimensionality=None, xp=cp): + if dimensionality is None: + dimensionality = 2 + elif dimensionality != 2: raise NotImplementedError( "Polynomial transforms are only implemented for 2D." ) - if params is None: - # default to transformation which preserves original coordinates - params = xp.asarray(np.array([[0, 1, 0], [0, 0, 1]])) - if params.shape[0] != 2: - raise ValueError("invalid shape of transformation parameters") - self.params = params + self.params = ( + xp.asarray([[0, 1, 0], [0, 0, 1]]) + if params is None + else xp.asarray(params) + ) + if self.params.shape == () or self.params.shape[0] != 2: + raise ValueError("Transformation parameters must have shape (2, N)") + @classmethod + def from_estimate(cls, src, dst, order=2, weights=None): + """Estimate polynomial transform from corresponding points.""" + return super().from_estimate(src, dst, order, weights) + + @_deprecate_estimate def estimate(self, src, dst, order=2, weights=None): + return self._estimate(src, dst, order=order, weights=weights) is None + + def _estimate(self, src, dst, order=2, weights=None): """Estimate the transformation from a set of corresponding points. You can determine the over-, well- and under-determined parameters @@ -1786,7 +1832,7 @@ def estimate(self, src, dst, order=2, weights=None): self.params = params.reshape((2, u // 2)) - return True + return None def __call__(self, coords): """Apply forward transformation. @@ -1827,6 +1873,10 @@ def inverse(self, coords): "then apply the forward transformation." ) + @classmethod + def identity(cls, dimensionality=None, xp=cp): + return cls(params=None, dimensionality=dimensionality, xp=xp) + TRANSFORMS = { "euclidean": EuclideanTransform, @@ -1907,10 +1957,7 @@ def estimate_transform(ttype, src, dst, *args, **kwargs): if ttype not in TRANSFORMS: raise ValueError(f"the transformation type '{ttype}' is notimplemented") - tform = TRANSFORMS[ttype](dimensionality=src.shape[1]) - tform.estimate(src, dst, *args, **kwargs) - - return tform + return TRANSFORMS[ttype].from_estimate(src, dst, *args, **kwargs) def matrix_transform(coords, matrix): diff --git a/python/cucim/src/cucim/skimage/transform/_warps.py b/python/cucim/src/cucim/skimage/transform/_warps.py index 6262fddb8..10545e1d9 100644 --- a/python/cucim/src/cucim/skimage/transform/_warps.py +++ b/python/cucim/src/cucim/skimage/transform/_warps.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import math @@ -37,18 +37,18 @@ def _preprocess_resize_output_shape(image, output_shape): Parameters ---------- - image: ndarray + image : ndarray Image to be resized. - output_shape: tuple or ndarray + output_shape : tuple or ndarray Size of the generated output image `(rows, cols[, ...][, dim])`. If `dim` is not provided, the number of channels is preserved. Returns ------- - image: ndarray + image : ndarray The input image, but with additional singleton dimensions appended in the case where ``len(output_shape) > input.ndim``. - output_shape: tuple + output_shape : tuple The output image converted to tuple. Raises @@ -112,7 +112,7 @@ def resize( Returns ------- resized : ndarray - Resized version of the input. + Resized version of the input. See Notes regarding dtype. Other parameters ---------------- @@ -147,6 +147,10 @@ def resize( downsampling factor, where s > 1. For the up-size case, s < 1, no anti-aliasing is performed prior to rescaling. + See Also + -------- + cupyx.scipy.ndimage.zoom + Notes ----- Modes 'reflect' and 'symmetric' are similar, but differ in whether the edge @@ -155,6 +159,18 @@ def resize( symmetric, the result would be [0, 1, 2, 2, 1, 0, 0], while for reflect it would be [0, 1, 2, 1, 0, 1, 2]. + `resize` uses interpolation. Unless the interpolation method is nearest-neighbor + (``order==0``), the algorithm will generate output values as weighted averages + of input values. Accordingly, the output dtype is ``float64`` with the following + exceptions: + + - When ``order==0``, the output dtype is ``image.dtype``. + - When ``image.dtype`` is ``float16`` or ``float32`` or an 8-bit or 16-bit + integer or unsigned integer type, the output dtype is ``float32``. + + For a similar function that preserves the dtype of the input, consider + `cucim.scipy.ndimage.zoom`. + Examples -------- >>> from skimage import data @@ -1338,14 +1354,14 @@ def _local_mean_weights(old_size, new_size, grid_mode, dtype): Parameters ---------- - old_size: int + old_size : int Old size. - new_size: int + new_size : int New size. grid_mode : bool Whether to use grid data model of pixel/voxel model for average weights computation. - dtype: dtype + dtype : dtype Output array data type. Returns diff --git a/python/cucim/src/cucim/skimage/transform/integral.py b/python/cucim/src/cucim/skimage/transform/integral.py index 51f863d02..65c24cf04 100644 --- a/python/cucim/src/cucim/skimage/transform/integral.py +++ b/python/cucim/src/cucim/skimage/transform/integral.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp @@ -20,6 +20,10 @@ def integral_image(image, *, dtype=None): ---------- image : ndarray Input image. + dtype : data-type, optional + Data type (NumPy dtype) to be used for calculation, and for + output array `S`. If None, defaults to the more precise of either + float64 or `image`'s dtype. Returns ------- diff --git a/python/cucim/src/cucim/skimage/transform/tests/test_geometric.py b/python/cucim/src/cucim/skimage/transform/tests/test_geometric.py index f5970954b..603e25ecf 100644 --- a/python/cucim/src/cucim/skimage/transform/tests/test_geometric.py +++ b/python/cucim/src/cucim/skimage/transform/tests/test_geometric.py @@ -1,9 +1,10 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import re import textwrap +from itertools import product import cupy as cp import numpy as np @@ -23,8 +24,12 @@ matrix_transform, ) from cucim.skimage.transform._geometric import ( + TRANSFORMS, GeometricTransform, _affine_matrix_from_vector, + _append_homogeneous_dim, + _apply_homogeneous, + _calc_center_normalize, _center_and_normalize_points, _euler_rotation_matrix, ) @@ -53,6 +58,22 @@ # fmt: on +HMAT_TFORMS = ( + FundamentalMatrixTransform, + ProjectiveTransform, + AffineTransform, + EuclideanTransform, + SimilarityTransform, +) + +HMAT_TFORMS_ND = ( + ProjectiveTransform, + AffineTransform, + EuclideanTransform, + SimilarityTransform, +) + + def test_estimate_transform(): for tform in ( "euclidean", @@ -84,19 +105,26 @@ def test_euclidean_estimation(): assert_array_almost_equal(tform2.params[0, 0], tform2.params[1, 1]) assert_array_almost_equal(tform2.params[0, 1], -tform2.params[1, 0]) - # via estimate method - tform3 = EuclideanTransform() - assert tform3.estimate(SRC, DST) + # via from_estimate classmethod + tform3 = EuclideanTransform.from_estimate(SRC, DST) assert_array_almost_equal(tform3.params, tform2.params) -def test_3d_euclidean_estimation(): - src_points = cp.random.rand(1000, 3) +@pytest.mark.parametrize( + "tform_class, has_scale", + ((EuclideanTransform, False), (SimilarityTransform, True)), +) +def test_3d_euclidean_similarity_estimation(tform_class, has_scale): + rng = np.random.default_rng(1234) + src_points = cp.asarray(rng.random((1000, 3))) # Random transformation for testing - angles = cp.random.random((3,)) * 2 * np.pi - np.pi + angles = cp.asarray(rng.random((3,))) * 2 * np.pi - np.pi rotation_matrix = cp.array(_euler_rotation_matrix(angles)) - translation_vector = cp.random.random((3,)) + if has_scale: + scale = rng.integers(1, 20) + rotation_matrix = rotation_matrix * scale + translation_vector = cp.asarray(rng.random((3,))) dst_points = [] for pt in src_points: pt_r = pt.reshape(3, 1) @@ -108,12 +136,12 @@ def test_3d_euclidean_estimation(): dst_points = cp.array(dst_points) # estimating the transformation - tform = EuclideanTransform(dimensionality=3) - assert tform.estimate(src_points, dst_points) - estimated_rotation = tform.rotation - estimated_translation = tform.translation - assert_array_almost_equal(estimated_rotation, rotation_matrix) - assert_array_almost_equal(estimated_translation, translation_vector) + tform = tform_class.from_estimate(src_points, dst_points) + assert tform + assert_array_almost_equal(tform.rotation, rotation_matrix) + assert_array_almost_equal(tform.translation, translation_vector) + if has_scale: + assert_array_almost_equal(tform.scale, scale) def test_euclidean_init(): @@ -157,45 +185,11 @@ def test_similarity_estimation(): assert_array_almost_equal(tform2.params[0, 0], tform2.params[1, 1]) assert_array_almost_equal(tform2.params[0, 1], -tform2.params[1, 0]) - # via estimate method - tform3 = SimilarityTransform() - assert tform3.estimate(SRC, DST) + # via from_estimate classmethod + tform3 = SimilarityTransform.from_estimate(SRC, DST) assert_array_almost_equal(tform3.params, tform2.params) -def test_3d_similarity_estimation(): - # Note: some choices of seed will produce poorly conditioned values, - # leading to NaN in the estimate. We fix the seed here to avoid this. - seed = 10 - rng = cp.random.default_rng(seed) - src_points = rng.random(size=(1000, 3)) - - # Random transformation for testing - angles = rng.random(size=(3,)) * 2 * np.pi - np.pi - scale = rng.integers(0, 20, size=(1,))[0] - rotation_matrix = cp.array(_euler_rotation_matrix(angles)) * scale - translation_vector = rng.random((3,)) - dst_points = [] - for pt in src_points: - pt_r = pt.reshape(3, 1) - dst = cp.matmul(rotation_matrix, pt_r) + translation_vector.reshape( - 3, 1 - ) - dst = dst.reshape(3) - dst_points.append(dst) - - dst_points = cp.array(dst_points) - # estimating the transformation - tform = SimilarityTransform(dimensionality=3) - assert tform.estimate(src_points, dst_points) - estimated_rotation = tform.rotation - estimated_translation = tform.translation - estimated_scale = tform.scale - assert_array_almost_equal(estimated_translation, translation_vector) - assert_array_almost_equal(estimated_scale, scale) - assert_array_almost_equal(estimated_rotation, rotation_matrix) - - def test_similarity_init(): # init with implicit parameters scale = 0.1 @@ -263,9 +257,8 @@ def test_affine_estimation(): tform2 = estimate_transform("affine", SRC, DST) assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC) - # via estimate method - tform3 = AffineTransform() - assert tform3.estimate(SRC, DST) + # via from_estimate classmethod + tform3 = AffineTransform.from_estimate(SRC, DST) assert_array_almost_equal(tform3.params, tform2.params) @@ -329,9 +322,53 @@ def test_affine_shear(xp): xp.testing.assert_array_almost_equal(tform.params, expected) +@pytest.mark.parametrize( + "pts, params", + product( + (SRC, DST), + ( + dict( + scale=(4, 5), + shear=(1.4, 1.8), + rotation=0.4, + translation=(10, 12), + ), + dict( + scale=(-0.5, 3), + shear=(-0.3, -0.1), + rotation=1.4, + translation=(-4, 3), + ), + ), + ), +) +def test_affine_params(pts, params): + out = AffineTransform(**params)(pts) + docstr_out = _apply_aff_2d(cp.asnumpy(pts), **params) + assert np.allclose(cp.asnumpy(out), docstr_out) + + +def _apply_aff_2d(pts, scale, rotation, shear, translation): + x, y = pts.T + sx, sy = scale + shear_x, shear_y = shear + tx, ty = translation + X = ( + sx * x * (np.cos(rotation) + np.tan(shear_y) * np.sin(rotation)) + - sy * y * (np.tan(shear_x) * np.cos(rotation) + np.sin(rotation)) + + tx + ) + Y = ( + sx * x * (np.sin(rotation) - np.tan(shear_y) * np.cos(rotation)) + - sy * y * (np.tan(shear_x) * np.sin(rotation) - np.cos(rotation)) + + ty + ) + return np.stack((X, Y), axis=1) + + def test_piecewise_affine(): - tform = PiecewiseAffineTransform() - assert tform.estimate(SRC, DST) + tform = PiecewiseAffineTransform.from_estimate(SRC, DST) + assert tform # make sure each single affine transform is exactly estimated assert_array_almost_equal(tform(SRC), DST) assert_array_almost_equal(tform.inverse(DST), SRC) @@ -365,6 +402,52 @@ def test_fundamental_matrix_estimation(xp): assert_array_almost_equal(tform.params, tform_ref, 6) +def _calc_distances(src, dst, F, metric="distance"): + src_h, dst_h = [_append_homogeneous_dim(pts) for pts in (src, dst)] + Fu = F @ src_h.T + uFu = cp.sum(dst_h.T * Fu, axis=0) + if metric == "distance": + return uFu / cp.sqrt(cp.sum(Fu[:-1] ** 2, axis=0)) + if metric == "epip-distances": + Fu_dash = F.T @ dst_h.T + scaler = 1 / cp.sum(Fu[:-1] ** 2, axis=0) + 1 / cp.sum( + Fu_dash[:-1] ** 2, axis=0 + ) + return (uFu**2) * scaler + raise ValueError(f'Invalid metric "{metric}"') + + +def test_fundamental_matrix_epipolar_projection(): + src = cp.array( + [ + [1.839035, 1.924743], + [0.543582, 0.375221], + [0.473240, 0.142522], + [0.964910, 0.598376], + [0.102388, 0.140092], + [15.994343, 9.622164], + [0.285901, 0.430055], + [0.091150, 0.254594], + ] + ) + dst = cp.array( + [ + [1.002114, 1.129644], + [1.521742, 1.846002], + [1.084332, 0.275134], + [0.293328, 0.588992], + [0.839509, 0.087290], + [1.779735, 1.116857], + [0.878616, 0.602447], + [0.642616, 1.028681], + ] + ) + tform = FundamentalMatrixTransform.from_estimate(src, dst) + assert tform + rms_ds = cp.abs(_calc_distances(src, dst, tform.params)) + assert bool(cp.all(rms_ds < 0.5)) + + @pytest.mark.parametrize("xp", [np, cp]) def test_fundamental_matrix_residuals(xp): essential_matrix_tform = EssentialMatrixTransform( @@ -407,13 +490,23 @@ def test_fundamental_matrix_inverse(xp): @pytest.mark.parametrize("xp", [np, cp]) def test_essential_matrix_init(xp): - tform = EssentialMatrixTransform( - rotation=xp.eye(3), translation=xp.array([0, 0, 1]), xp=xp - ) + r = xp.eye(3) + t = xp.array([0, 0, 1]) + tform = EssentialMatrixTransform(rotation=r, translation=t, xp=xp) assert_array_equal( tform.params, xp.array([0, -1, 0, 1, 0, 0, 0, 0, 0]).reshape(3, 3) ) + with pytest.raises( + ValueError, match="Translation vector must have unit length" + ): + EssentialMatrixTransform( + rotation=r, translation=xp.array([0, 0, 2]), xp=xp + ) + with pytest.raises( + ValueError, match="Rotation matrix must have unit determinant" + ): + EssentialMatrixTransform(rotation=r[::-1], translation=t, xp=xp) @pytest.mark.parametrize("xp", [np, cp]) @@ -486,9 +579,8 @@ def test_projective_estimation(): tform2 = estimate_transform("projective", SRC, DST) assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC) - # via estimate method - tform3 = ProjectiveTransform() - assert tform3.estimate(SRC, DST) + # via from_estimate classmethod + tform3 = ProjectiveTransform.from_estimate(SRC, DST) assert_array_almost_equal(tform3.params, tform2.params) @@ -531,9 +623,8 @@ def test_polynomial_estimation(): tform = estimate_transform("polynomial", SRC, DST, order=10) assert_array_almost_equal(tform(SRC), DST, 6) - # via estimate method - tform2 = PolynomialTransform() - assert tform2.estimate(SRC, DST, order=10) + # via from_estimate classmethod + tform2 = PolynomialTransform.from_estimate(SRC, DST, order=10) assert_array_almost_equal(tform2.params, tform.params) @@ -688,25 +779,30 @@ def test_invalid_input(xp): ) -def test_degenerate(xp=cp): +@pytest.mark.parametrize( + "tform_class, msg", + ( + (ProjectiveTransform, "Scaling generated NaN values"), + (AffineTransform, "Scaling generated NaN values"), + (EuclideanTransform, "Poor conditioning for estimation"), + (SimilarityTransform, "Poor conditioning for estimation"), + ), +) +@pytest.mark.parametrize("xp", [np, cp]) +def test_degenerate(xp, tform_class, msg): src = dst = xp.zeros((10, 2)) - tform = SimilarityTransform() - assert not tform.estimate(src, dst) + tf = tform_class.from_estimate(src, dst) + assert not tf + assert str(tf) == f"{tform_class.__name__}: {msg}" + tform = tform_class.identity() + with pytest.warns(FutureWarning, match="`estimate` is deprecated"): + assert not tform.estimate(src, dst) assert xp.all(xp.isnan(tform.params)) - tform = EuclideanTransform() - assert not tform.estimate(src, dst) - assert xp.all(xp.isnan(tform.params)) - - tform = AffineTransform() - assert not tform.estimate(src, dst) - assert xp.all(xp.isnan(tform.params)) - - tform = ProjectiveTransform() - assert not tform.estimate(src, dst) - assert xp.all(xp.isnan(tform.params)) +@pytest.mark.parametrize("xp", [np, cp]) +def test_degenerate_2(xp): # See gh-3926 for discussion details tform = ProjectiveTransform() for i in range(20): @@ -718,13 +814,15 @@ def test_degenerate(xp=cp): src[:, 1] = xp.random.rand() # Prior to gh-3926, under the above circumstances, # a transform could be returned with nan values. - assert not tform.estimate(src, dst) or xp.isfinite(tform.params).all() - - src = xp.array([[0, 2, 0], [0, 2, 0], [0, 4, 0]]) - dst = xp.array([[0, 1, 0], [0, 1, 0], [0, 3, 0]]) - tform = AffineTransform() - assert not tform.estimate(src, dst) - assert xp.all(xp.isnan(tform.params)) + tf = ProjectiveTransform.from_estimate(src, dst) + assert not tf + assert str(tf) == ( + "ProjectiveTransform: Right singular vector has 0 final element" + ) + with pytest.warns(FutureWarning, match="`estimate` is deprecated"): + result = tform.estimate(src, dst) + assert not result + assert xp.all(xp.isnan(tform.params)) # The tessellation on the following points produces one degenerate affine # warp within PiecewiseAffineTransform. @@ -755,27 +853,68 @@ def test_degenerate(xp=cp): [0, 142, 206], ] ) + # from_estimate + tform = PiecewiseAffineTransform.from_estimate(src, dst) + # Simplex group index 4 has degenerate affine. + bad_affine_i = 4 + assert not tform + assert str(tform).startswith( + f"PiecewiseAffineTransform: Failure at forward simplex {bad_affine_i}" + ) + # estimate method records steps in affine estimation. tform = PiecewiseAffineTransform() - assert not tform.estimate(src, dst) - assert np.all(np.isnan(tform.affines[4].params)) # degenerate affine + with pytest.warns(FutureWarning, match="`estimate` is deprecated"): + assert not tform.estimate(src, dst) + # Check for degenerate affine. + assert xp.all(xp.isnan(tform.affines[bad_affine_i].params)) for idx, affine in enumerate(tform.affines): - if idx != 4: + if idx != bad_affine_i: assert not xp.all(xp.isnan(affine.params)) for affine in tform.inverse_affines: assert not xp.all(xp.isnan(affine.params)) @pytest.mark.parametrize("xp", [np, cp]) -def test_normalize_degenerate_points(xp): +@pytest.mark.parametrize("scaling", ["rms", "mrs"]) +def test_normalize_degenerate_points(xp, scaling): """Return nan matrix *of appropriate size* when point is repeated.""" pts = xp.array([[73.42834308, 94.2977623]] * 3) - mat, pts_tf, _ = _center_and_normalize_points(pts) + mat, pts_tf = _center_and_normalize_points(pts, scaling=scaling) assert xp.all(xp.isnan(mat)) assert xp.all(xp.isnan(pts_tf)) assert mat.shape == (3, 3) assert pts_tf.shape == pts.shape +@pytest.mark.parametrize("xp", [np, cp]) +@pytest.mark.parametrize("scaling", ["rms", "mrs", "raw", "foo"]) +def test_calc_center_normalize(xp, scaling): + if xp == np: + src = cp.asnumpy(SRC) + else: + src = SRC + + if scaling not in ["rms", "mrs", "raw"]: + with pytest.raises(ValueError, match='Unexpected "scaling"'): + _center_and_normalize_points(src, scaling=scaling) + return + + mat = _calc_center_normalize(src, scaling=scaling) + if scaling == "raw": + xp.testing.assert_array_equal(mat, xp.eye(3)) + return + out_pts = _apply_homogeneous(mat, src) + assert xp.allclose(xp.mean(out_pts, axis=0), 0) + if scaling == "rms": + assert xp.isclose(xp.sqrt(xp.mean(out_pts**2)), 1) + elif scaling == "mrs": + scaler = xp.mean(xp.sqrt(xp.sum(out_pts**2, axis=1))) + assert xp.isclose(scaler, np.sqrt(2)) + mat2, normed = _center_and_normalize_points(src, scaling=scaling) + assert_array_equal(mat, mat2) + assert_array_equal(out_pts, normed) + + @pytest.mark.parametrize("xp", [np, cp]) def test_projective_repr(xp): tform = ProjectiveTransform(xp=xp) @@ -827,8 +966,10 @@ def _assert_least_squares(tf, src, dst, xp=cp): assert new_ssq > baseline -@pytest.mark.parametrize("xp", [np, cp]) -def test_estimate_affine_3d(xp): +@pytest.mark.parametrize( + "xp, array_like_input", [(np, False), (np, True), (cp, False)] +) +def test_estimate_affine_3d(xp, array_like_input): ndim = 3 src = np.random.random((25, ndim)) * 2 ** np.arange(7, 7 + ndim) src = xp.asarray(src) @@ -842,14 +983,25 @@ def test_estimate_affine_3d(xp): ) tf = AffineTransform(matrix=matrix) dst = tf(src) - dst_noisy = dst + xp.asarray(np.random.random((25, ndim))) - tf2 = AffineTransform(dimensionality=ndim) - assert tf2.estimate(src, dst_noisy) + dst_noisy = dst + xp.random.random((25, ndim)) + if array_like_input: + # list of lists for destination coords + dst = [list(c) for c in dst] + tf2 = AffineTransform.from_estimate(src, dst_noisy) # we check rot/scale/etc more tightly than translation because translation # estimation is on the 1 pixel scale - assert_array_almost_equal(tf2.params[:, :-1], matrix[:, :-1], decimal=2) - assert_array_almost_equal(tf2.params[:, -1], matrix[:, -1], decimal=0) + matrix = xp.asarray(matrix) + xp.testing.assert_array_almost_equal( + tf2.params[:, :-1], matrix[:, :-1], decimal=2 + ) + xp.testing.assert_array_almost_equal( + tf2.params[:, -1], matrix[:, -1], decimal=0 + ) _assert_least_squares(tf2, src, dst_noisy) + tf3 = AffineTransform(dimensionality=ndim) + with pytest.warns(FutureWarning, match="`estimate` is deprecated"): + assert tf3.estimate(src, dst_noisy) + xp.testing.assert_array_equal(tf2.params, tf3.params) def test_fundamental_3d_not_implemented(): @@ -885,6 +1037,29 @@ def test_affine_params_nD_error(): _ = AffineTransform(scale=5, dimensionality=3) +EG_OPS = dict( + scale=(4, 5), shear=(1.4, 1.8), rotation=0.4, translation=(10, 12) +) + + +@pytest.mark.parametrize( + "tform_class, op_order", + ( + (AffineTransform, ("scale", "shear", "rotation", "translation")), + (EuclideanTransform, ("rotation", "translation")), + (SimilarityTransform, ("scale", "rotation", "translation")), + ), +) +def test_transform_order(tform_class, op_order): + ops = [(k, EG_OPS[k]) for k in op_order] + part_xforms = [tform_class(**{k: v}) for k, v in ops] + full_xform = tform_class(**dict(ops)) + out = np.eye(3) + for tf in part_xforms: + out = cp.asnumpy(tf.params) @ out + assert np.allclose(cp.asnumpy(full_xform.params), out) + + def test_euler_rotation(): v = [0, 10, 0] angles = np.radians([90, 45, 45]) @@ -893,35 +1068,42 @@ def test_euler_rotation(): assert_array_almost_equal(R @ v, expected, decimal=1) +def _from_matvec(mat, vec, xp): + mat = xp.asarray(mat) + vec = xp.asarray(vec) + d = mat.shape[0] + out = xp.eye(d + 1) + out[:-1, :-1] = mat + out[:-1, -1] = vec + return out + + @pytest.mark.parametrize("xp", [np, cp]) -def test_euclidean_param_defaults(xp): +@pytest.mark.parametrize( + "tform_class", (EuclideanTransform, SimilarityTransform) +) +def test_euclidean_param_defaults(xp, tform_class): # 2D rotation is 0 when only translation is given - tf = EuclideanTransform(translation=(5, 5)) + tf = tform_class(translation=(5, 5), xp=xp) assert tf.params[0, 1] == 0 # off diagonals are 0 when only translation is given - tf = EuclideanTransform(translation=(4, 5, 9), dimensionality=3) + tf = tform_class(translation=(4, 5, 9), dimensionality=3, xp=xp) assert xp.all(tf.params[[0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1]] == 0) + tf = tform_class(translation=(5, 6, 7, 8), xp=xp) + xp.testing.assert_array_equal( + tf.params, _from_matvec(xp.eye(4), (5, 6, 7, 8), xp) + ) + with pytest.raises(ValueError): + _ = tform_class(translation=(5, 6, 7, 8), dimensionality=3, xp=xp) with pytest.raises(ValueError): - # specifying parameters for D>3 is not supported - _ = EuclideanTransform(translation=(5, 6, 7, 8), dimensionality=4) + _ = tform_class(rotation=(4, 8), xp=xp) with pytest.raises(ValueError): - # incorrect number of angles for given dimensionality - _ = EuclideanTransform(rotation=(4, 8), dimensionality=3) + _ = tform_class(rotation=(4, 8, 2, 4), xp=xp) # translation is 0 when rotation is given - tf = EuclideanTransform(rotation=np.pi * cp.arange(3), dimensionality=3) + tf = tform_class(rotation=xp.pi * xp.arange(3), dimensionality=3, xp=xp) assert xp.all(tf.params[:-1, 3] == 0) -@pytest.mark.parametrize("xp", [np, cp]) -def test_similarity_transform_params(xp): - with pytest.raises(ValueError): - _ = SimilarityTransform( - translation=(4, 5, 6, 7), dimensionality=4, xp=xp - ) - tf = SimilarityTransform(scale=4, dimensionality=3, xp=xp) - assert_array_equal(tf(xp.asarray([[1, 1, 1]])), xp.asarray([[4, 4, 4]])) - - @pytest.mark.parametrize("xp", [np, cp]) def test_euler_angle_consistency(xp): angles = xp.random.random((3,)) * 2 * np.pi - np.pi @@ -939,3 +1121,64 @@ def test_2D_only_implementations(xp): _ = tf.rotation with pytest.raises(NotImplementedError): _ = tf.shear + + +@pytest.mark.parametrize( + "tform_class", + [t for t in TRANSFORMS.values() if t is not PiecewiseAffineTransform], +) +def test_identity(tform_class): + rng = np.random.default_rng(0) + allows_nd = tform_class in HMAT_TFORMS_ND + dims = (2, 3, 4) if allows_nd else (2,) + for ndim in dims: + src = cp.asarray(rng.normal(size=(10, ndim))) + t = tform_class.identity(ndim) + if isinstance(t, FundamentalMatrixTransform): + out = cp.concatenate((src, cp.ones((len(src), 1))), axis=1) + else: + out = src + assert_array_almost_equal(t(src), out) + + +@pytest.mark.parametrize("xp", [np, cp]) +@pytest.mark.parametrize( + "tform_class", + [t for t in TRANSFORMS.values() if t is not PiecewiseAffineTransform], +) +def test_identity_array_module(xp, tform_class): + t = tform_class.identity(2, xp=xp) + assert type(t.params) is xp.ndarray + + +@pytest.mark.parametrize("xp", [np, cp]) +@pytest.mark.parametrize( + "tform_class", + [ + ProjectiveTransform, + AffineTransform, + EuclideanTransform, + SimilarityTransform, + PolynomialTransform, + ], +) +def test_from_estimate_array_module(xp, tform_class): + src = xp.array([[0, 0], [0, 50], [50, 50], [50, 0]], dtype=float) + dst = xp.array([[0, 0], [0, 50], [50, 50], [50, 0]], dtype=float) + if tform_class is PolynomialTransform: + tf = tform_class.from_estimate(src, dst, order=2) + else: + tf = tform_class.from_estimate(src, dst) + assert tf + assert type(tf.params) is xp.ndarray + + +@pytest.mark.parametrize("tform_class", HMAT_TFORMS) +def test_kw_only_params(tform_class): + with pytest.raises(TypeError): + tform_class(None, None) + + +def test_kw_only_emt(): + with pytest.raises(TypeError): + EssentialMatrixTransform(None) diff --git a/python/cucim/src/cucim/skimage/util/_map_array.py b/python/cucim/src/cucim/skimage/util/_map_array.py index 889573829..23602bd07 100644 --- a/python/cucim/src/cucim/skimage/util/_map_array.py +++ b/python/cucim/src/cucim/skimage/util/_map_array.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp @@ -39,7 +39,7 @@ def map_array(input_arr, input_vals, output_vals, out=None): The values to map from. output_vals : array, shape (K,) The values to map to. - out: array, same shape as `input_arr` + out : array, same shape as `input_arr` The output array. Will be created if not provided. It should have the same dtype as `output_vals`. diff --git a/python/cucim/src/cucim/skimage/util/noise.py b/python/cucim/src/cucim/skimage/util/noise.py index 9c8f5be16..be52e4149 100644 --- a/python/cucim/src/cucim/skimage/util/noise.py +++ b/python/cucim/src/cucim/skimage/util/noise.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp @@ -95,10 +95,10 @@ def random_noise(image, mode="gaussian", rng=None, clip=True, **kwargs): Note: `cupy.random.Generator` is not yet fully supported. Please use an integer seed instead. clip : bool, optional - If True (default), the output will be clipped after noise applied - for modes `'speckle'`, `'poisson'`, and `'gaussian'`. This is - needed to maintain the proper image data range. If False, clipping - is not applied, and the output may extend beyond the range [-1, 1]. + If True (default), the output will be clipped after noise is applied. + This may be needed to maintain the proper image data range. + If False, clipping is not applied, and the output may extend beyond + the range [-1, 1]. mean : float, optional Mean of random distribution. Used in 'gaussian' and 'speckle'. Default : 0. @@ -237,6 +237,7 @@ def random_noise(image, mode="gaussian", rng=None, clip=True, **kwargs): rng=rng, amount=kwargs["amount"], salt_vs_pepper=1.0, + clip=False, ) elif mode == "pepper": @@ -247,6 +248,7 @@ def random_noise(image, mode="gaussian", rng=None, clip=True, **kwargs): rng=rng, amount=kwargs["amount"], salt_vs_pepper=0.0, + clip=False, ) elif mode == "s&p": diff --git a/python/cucim/src/cucim/skimage/util/tests/test_random_noise.py b/python/cucim/src/cucim/skimage/util/tests/test_random_noise.py index f251ee507..39be97f7d 100644 --- a/python/cucim/src/cucim/skimage/util/tests/test_random_noise.py +++ b/python/cucim/src/cucim/skimage/util/tests/test_random_noise.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team -# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2021-2026, NVIDIA CORPORATION. All rights reserved. # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause import cupy as cp @@ -100,6 +100,41 @@ def test_salt_and_pepper(): assert 0.18 < saltmask.sum() / peppermask.sum() < 0.35 +def test_clipping_application_consistency(): + """Ensure that clipping is not applied if 'clip' set to False regardless of 'mode' argument""" + + img = ( + cp.random.rand(1, 5, 5) * 10 - 5 + ) # image array with c,h,w shape semantics and range [-5,5] + # we have float values outside of [-1,1], so we presumably don't want to clip + + arr_out_pepper = random_noise( + img, mode="pepper", rng=42, clip=False, amount=0.5 + ) + arr_out_pepper2 = random_noise( + img, mode="s&p", salt_vs_pepper=0, rng=42, clip=False, amount=0.5 + ) + + arr_out_salt = random_noise( + img, mode="salt", rng=42, clip=False, amount=0.5 + ) + arr_out_salt2 = random_noise( + img, mode="s&p", salt_vs_pepper=1, rng=42, clip=False, amount=0.5 + ) + + # check that clipping has been consistently applied + assert cp.all(arr_out_pepper == arr_out_pepper2) + assert cp.all(arr_out_salt == arr_out_salt2) + + # check clipping to range 0->1 didn't happen + assert cp.max(arr_out_pepper) != 1 + assert cp.min(arr_out_pepper) != 0 + + # check clipping to range 0->1 didn't happen + assert cp.max(arr_out_salt) != 1 + assert cp.min(arr_out_salt) != 0 + + def test_gaussian(): seed = 42 data = cp.zeros((128, 128)) + 0.5