Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 2 additions & 6 deletions onedal/_device_offload.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from sklearn import get_config

from ._config import _get_config
from .datatypes import copy_to_dpnp, copy_to_usm, dlpack_to_numpy
from .datatypes import copy_to_dpnp, dlpack_to_numpy
from .utils import _sycl_queue_manager as QM
from .utils._array_api import _asarray, _get_sycl_namespace, _is_numpy_namespace
from .utils._third_party import is_dpnp_ndarray
Expand Down Expand Up @@ -159,11 +159,7 @@ def wrapper_impl(*args, **kwargs):
result = invoke_func(self, *hostargs, **hostkwargs)

if queue and hasattr(data, "__sycl_usm_array_interface__"):
return (
copy_to_dpnp(queue, result)
if is_dpnp_ndarray(data)
else copy_to_usm(queue, result)
)
return copy_to_dpnp(queue, result)

if get_config().get("transform_output") in ("default", None):
input_array_api = getattr(data, "__array_namespace__", lambda: None)()
Expand Down
3 changes: 1 addition & 2 deletions onedal/datatypes/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,10 @@

from ._data_conversion import from_table, return_type_constructor, to_table
from ._dlpack import dlpack_to_numpy, get_torch_queue
from ._sycl_usm import copy_to_dpnp, copy_to_usm
from ._sycl_usm import copy_to_dpnp

__all__ = [
"copy_to_dpnp",
"copy_to_usm",
"dlpack_to_numpy",
"from_table",
"get_torch_queue",
Expand Down
22 changes: 4 additions & 18 deletions onedal/datatypes/_data_conversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,12 @@
# limitations under the License.
# ==============================================================================

import warnings

import numpy as np
import scipy.sparse as sp

from onedal import _default_backend as backend

from ..utils._third_party import is_dpctl_tensor, is_dpnp_ndarray, lazy_import
from ..utils._third_party import is_dpnp_ndarray, lazy_import


def _apply_and_pass(func, *args, **kwargs):
Expand Down Expand Up @@ -116,19 +114,7 @@ def return_type_constructor(array):
xp = array.__array_namespace__()
# array api support added in dpnp starting in 0.19, will fail for
# older versions
if is_dpctl_tensor(array):
warnings.warn(
"dpctl tensors are deprecated and support for them in "
"scikit-learn-intelex will be removed in 2026.0.0. "
"Consider using dpnp arrays instead.",
FutureWarning,
)
func = lambda x: (
xp.asarray(x)
if hasattr(x, "__sycl_usm_array_interface__")
else xp.asarray(backend.from_table(x), device=device)
)
elif is_dpnp_ndarray(array):
if is_dpnp_ndarray(array):
func = lambda x: (
Comment thread
ethanglaser marked this conversation as resolved.
xp.asarray(xp.as_usm_ndarray(x))
if hasattr(x, "__sycl_usm_array_interface__")
Expand All @@ -152,8 +138,8 @@ def return_type_constructor(array):
def from_table(*args, like=None):
"""Create 2 dimensional arrays from oneDAL tables.

oneDAL tables are converted to numpy ndarrays, dpctl tensors, dpnp
ndarrays, or array API standard arrays of designated type.
oneDAL tables are converted to numpy ndarrays, dpnp ndarrays,
or array API standard arrays of designated type.

Parameters
----------
Expand Down
2 changes: 1 addition & 1 deletion onedal/datatypes/_dlpack.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def dlpack_to_numpy(obj):
# convert to numpy
try:
# Some frameworks implement an __array__ method just to
# throw a RuntimeError when used (array_api_strict, dpctl),
# throw a RuntimeError when used (array_api_strict),
# or a TypeError (array_api-strict) rather than an AttributeError
# therefore a try catch is necessary (logic is essentially a
# getattr call + some)
Expand Down
29 changes: 5 additions & 24 deletions onedal/datatypes/_sycl_usm.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,41 +22,22 @@
from ..utils._third_party import lazy_import


@lazy_import("dpctl.memory", "dpctl.tensor")
def _array_to_usm(memory, tensor, queue, array):
@lazy_import("dpnp")
def _to_dpnp(dpnp, queue, array):
try:
mem = memory.MemoryUSMDevice(array.nbytes, queue=queue)
mem.copy_from_host(array.tobytes())
return tensor.usm_ndarray(array.shape, array.dtype, buffer=mem)
return dpnp.asarray(array, usm_type="device", sycl_queue=queue)
except ValueError as e:
# ValueError will raise if device does not support the dtype
# retry with float32 (needed for fp16 and fp64 support issues)
# try again as float32, if it is a float32 just raise the error.
if array.dtype == np.float32:
raise e
return _array_to_usm(queue, array.astype(np.float32))


@lazy_import("dpnp", "dpctl.tensor")
def _to_dpnp(dpnp, tensor, array):
if isinstance(array, tensor.usm_ndarray):
return dpnp.array(array, copy=False)
else:
return array


def copy_to_usm(queue, array):
if hasattr(array, "tobytes"):
return _array_to_usm(queue, array)
else:
if isinstance(array, Iterable) and not sp.issparse(array):
array = [copy_to_usm(queue, i) for i in array]
return array
return _to_dpnp(queue, array.astype(np.float32))


def copy_to_dpnp(queue, array):
if hasattr(array, "tobytes"):
return _to_dpnp(_array_to_usm(queue, array))
return _to_dpnp(queue, array)
else:
if isinstance(array, Iterable) and not sp.issparse(array):
array = [copy_to_dpnp(queue, i) for i in array]
Expand Down
23 changes: 6 additions & 17 deletions onedal/datatypes/tests/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@

if dpctl_available:
import dpctl
from dpctl.tensor import usm_ndarray

def _get_sycl_queue(syclobj):
if hasattr(syclobj, "_get_capsule"):
Expand All @@ -35,22 +34,12 @@ def _get_sycl_queue(syclobj):
return dpctl.SyclQueue(syclobj)

def _assert_tensor_attr(actual, desired, order):
"""Check attributes of two given USM tensors."""
is_usm_tensor = (
lambda x: dpnp_available
and isinstance(x, dpnp.ndarray)
or isinstance(x, usm_ndarray)
)
assert is_usm_tensor(actual)
assert is_usm_tensor(desired)
# dpctl.tensor is the dpnp.ndarrays's core tensor structure along
# with advanced device management. Convert dpnp to dpctl.tensor with zero copy.
get_tensor = lambda x: (
x.get_array() if dpnp_available and isinstance(x, dpnp.ndarray) else x
)
# Now DPCtl tensors
actual = get_tensor(actual)
desired = get_tensor(desired)
"""Check attributes of two given USM arrays."""
assert dpnp_available and isinstance(actual, dpnp.ndarray)
assert dpnp_available and isinstance(desired, dpnp.ndarray)
# Convert dpnp to underlying usm_ndarray with zero copy.
actual = actual.get_array()
desired = desired.get_array()

assert actual.shape == desired.shape
assert actual.strides == desired.strides
Expand Down
39 changes: 14 additions & 25 deletions onedal/datatypes/tests/test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,9 +246,7 @@ def test_conversion_to_table(dtype):
not backend.is_dpc,
reason="__sycl_usm_array_interface__ support requires DPC backend.",
)
@pytest.mark.parametrize(
"dataframe,queue", get_dataframes_and_queues("dpctl,dpnp", "cpu,gpu")
)
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues("dpnp", "cpu,gpu"))
@pytest.mark.parametrize("order", ["C", "F"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64, np.int32, np.int64])
def test_input_zero_copy_sycl_usm(dataframe, queue, order, dtype):
Expand Down Expand Up @@ -280,9 +278,7 @@ def test_input_zero_copy_sycl_usm(dataframe, queue, order, dtype):
not backend.is_dpc,
reason="__sycl_usm_array_interface__ support requires DPC backend.",
)
@pytest.mark.parametrize(
"dataframe,queue", get_dataframes_and_queues("dpctl,dpnp", "cpu,gpu")
)
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues("dpnp", "cpu,gpu"))
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("data_shape", data_shapes)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
Expand Down Expand Up @@ -340,15 +336,15 @@ def test_table_conversions_sycl_usm(dataframe, queue, order, data_shape, dtype):


@pytest.mark.parametrize(
"dataframe,queue", get_dataframes_and_queues("numpy,dpctl,dpnp,array_api", "cpu,gpu")
"dataframe,queue", get_dataframes_and_queues("numpy,dpnp,array_api", "cpu,gpu")
)
@pytest.mark.parametrize("data_shape", unsupported_data_shapes)
def test_interop_invalid_shape(dataframe, queue, data_shape):
X = np.zeros(data_shape)
X = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)

expected_err_msg = r"Input array has wrong dimensionality \(must be 2d\)."
if dataframe in "dpctl,dpnp":
if dataframe == "dpnp":
expected_err_msg = (
"Unable to convert from SUA interface: only 1D & 2D tensors are allowed"
)
Expand All @@ -357,7 +353,7 @@ def test_interop_invalid_shape(dataframe, queue, data_shape):


@pytest.mark.parametrize(
"dataframe,queue", get_dataframes_and_queues("dpctl,dpnp,array_api", "cpu,gpu")
"dataframe,queue", get_dataframes_and_queues("dpnp,array_api", "cpu,gpu")
)
@pytest.mark.parametrize(
"dtype",
Expand All @@ -370,8 +366,7 @@ def test_interop_invalid_shape(dataframe, queue, data_shape):
def test_interop_unsupported_dtypes(dataframe, queue, dtype):
# sua iface interobility supported only for oneDAL supported dtypes
# for input data: int32, int64, float32, float64.
# Checking some common dtypes supported by dpctl, dpnp for exception
# raise.
# Checking some common dtypes supported by dpnp for exception raise.
X = np.zeros((10, 20), dtype=dtype)
X = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
expected_err_msg = r"Found unsupported (array|tensor) type"
Expand All @@ -381,10 +376,10 @@ def test_interop_unsupported_dtypes(dataframe, queue, dtype):


@pytest.mark.parametrize(
"dataframe,queue", get_dataframes_and_queues("numpy,dpctl,dpnp", "cpu,gpu")
"dataframe,queue", get_dataframes_and_queues("numpy,dpnp", "cpu,gpu")
)
def test_to_table_non_contiguous_input(dataframe, queue):
if dataframe in "dpnp,dpctl" and not backend.is_dpc:
if dataframe == "dpnp" and not backend.is_dpc:
pytest.skip("__sycl_usm_array_interface__ support requires DPC backend.")
X, _ = np.mgrid[:10, :10]
X = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
Expand All @@ -398,9 +393,7 @@ def test_to_table_non_contiguous_input(dataframe, queue):
backend.is_dpc,
reason="Required check should be done if no DPC backend.",
)
@pytest.mark.parametrize(
"dataframe,queue", get_dataframes_and_queues("dpctl,dpnp", "cpu,gpu")
)
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues("dpnp", "cpu,gpu"))
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_interop_if_no_dpc_backend_sycl_usm(dataframe, queue, dtype):
X = np.zeros((10, 20), dtype=dtype)
Expand Down Expand Up @@ -524,9 +517,7 @@ def test_basic_ndarray_types_numpy(X):
test_non_array(np.asarray(X), None)


@pytest.mark.parametrize(
"dataframe,queue", get_dataframes_and_queues("dpctl,numpy", "cpu,gpu")
)
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues("numpy", "cpu,gpu"))
@pytest.mark.parametrize("can_copy", [True, False])
def test_to_table_non_contiguous_input_dlpack(dataframe, queue, can_copy):
X, _ = np.mgrid[:10, :10]
Expand All @@ -551,9 +542,7 @@ def test_to_table_non_contiguous_input_dlpack(dataframe, queue, can_copy):
to_table(X_tens)


@pytest.mark.parametrize(
"dataframe,queue", get_dataframes_and_queues("dpctl,numpy", "cpu,gpu")
)
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues("numpy", "cpu,gpu"))
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("data_shape", data_shapes)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
Expand All @@ -578,7 +567,7 @@ def test_table_conversions_dlpack(dataframe, queue, order, data_shape, dtype):


@pytest.mark.parametrize(
"dataframe,queue", get_dataframes_and_queues("numpy,dpctl,array_api", "cpu,gpu")
"dataframe,queue", get_dataframes_and_queues("numpy,array_api", "cpu,gpu")
)
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("data_shape", data_shapes)
Expand Down Expand Up @@ -621,7 +610,7 @@ def test_table___dlpack__(dataframe, queue, order, data_shape, dtype):
@pytest.mark.skipif(
not hasattr(np, "from_dlpack"), reason="no dlpack support in installed numpy"
)
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues("dpctl", "cpu,gpu"))
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues("dpnp", "cpu,gpu"))
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("data_shape", data_shapes)
@pytest.mark.parametrize("dtype", [np.float32, np.float64, np.int32, np.int64])
Expand Down Expand Up @@ -664,7 +653,7 @@ def test_table_writable_dlpack(queue):
"""Test if __dlpack__ attribute can be properly consumed by moving data
to host from a SYCL device.
"""
xp = pytest.importorskip("dpctl.tensor")
xp = pytest.importorskip("dpnp")
X = xp.eye(5, 8, dtype=xp.float32, device=queue)
X.flags["W"] = False
X_table = to_table(X)
Expand Down
17 changes: 2 additions & 15 deletions onedal/tests/utils/_dataframes_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,6 @@

from sklearnex import get_config

from ...utils._third_party import dpctl_available

if dpctl_available:
import dpctl.tensor as dpt

try:
import dpnp

Expand Down Expand Up @@ -55,7 +50,7 @@
from onedal.tests.utils._device_selection import get_queues

test_frameworks = os.environ.get(
"ONEDAL_PYTEST_FRAMEWORKS", "numpy,pandas,dpnp,dpctl,array_api"
"ONEDAL_PYTEST_FRAMEWORKS", "numpy,pandas,dpnp,array_api"
)


Expand Down Expand Up @@ -113,8 +108,6 @@ def get_df_and_q(dataframe: str):
df_and_q.append(pytest.param(dataframe, queue.values[0], id=id))
return df_and_q

if dpctl_available and "dpctl" in dataframe_filter_:
dataframes_and_queues.extend(get_df_and_q("dpctl"))
if dpnp_available and "dpnp" in dataframe_filter_:
dataframes_and_queues.extend(get_df_and_q("dpnp"))
if (
Expand All @@ -131,8 +124,6 @@ def _as_numpy(obj, *args, **kwargs):
"""Converted input object to numpy.ndarray format."""
if dpnp_available and isinstance(obj, dpnp.ndarray):
return obj.asnumpy(*args, **kwargs)
if dpctl_available and isinstance(obj, dpt.usm_ndarray):
return dpt.to_numpy(obj, *args, **kwargs)
if isinstance(obj, pd.DataFrame) or isinstance(obj, pd.Series):
return obj.to_numpy(*args, **kwargs)
if sp.issparse(obj):
Expand Down Expand Up @@ -166,12 +157,8 @@ def _convert_to_dataframe(obj, sycl_queue=None, target_df=None, *args, **kwargs)
return dpnp.asarray(
obj, usm_type="device", sycl_queue=sycl_queue, *args, **kwargs
)
elif target_df == "dpctl":
# DPCtl tensor.
return dpt.asarray(obj, usm_type="device", sycl_queue=sycl_queue, *args, **kwargs)
elif target_df in array_api_modules:
# Array API input other than DPNP ndarray, DPCtl tensor or
# Numpy ndarray.
# Array API input other than DPNP ndarray or Numpy ndarray.

xp = array_api_modules[target_df]
return xp.asarray(obj)
Expand Down
15 changes: 1 addition & 14 deletions onedal/utils/_array_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@

"""Tools to support array_api."""

import warnings
from collections.abc import Iterable
from functools import lru_cache

Expand Down Expand Up @@ -66,19 +65,7 @@ def _is_numpy_namespace(xp):

@lru_cache(100)
def _cls_to_sycl_namespace(cls):
# use caching to minimize imports, derived from array_api_compat
if _is_subclass_fast(cls, "dpctl.tensor", "usm_ndarray"):
import dpctl.tensor as dpt

warnings.warn(
"dpctl tensors are deprecated and support for them in "
"scikit-learn-intelex will be removed in 2026.0.0. "
"Consider using dpnp arrays instead.",
FutureWarning,
)

return dpt
elif _is_subclass_fast(cls, "dpnp", "ndarray"):
if _is_subclass_fast(cls, "dpnp", "ndarray"):
import dpnp

return dpnp
Expand Down
Loading
Loading