diff --git a/dpctl/tensor/__init__.py b/dpctl/tensor/__init__.py index 42b0cf49f5..2e720cba92 100644 --- a/dpctl/tensor/__init__.py +++ b/dpctl/tensor/__init__.py @@ -94,17 +94,21 @@ from ._elementwise_funcs import ( abs, add, + conj, cos, divide, equal, exp, expm1, + imag, isfinite, isinf, isnan, log, log1p, multiply, + proj, + real, sin, sqrt, subtract, @@ -188,14 +192,18 @@ "inf", "abs", "add", + "conj", "cos", "exp", "expm1", + "imag", "isinf", "isnan", "isfinite", "log", "log1p", + "proj", + "real", "sin", "sqrt", "divide", diff --git a/dpctl/tensor/_elementwise_funcs.py b/dpctl/tensor/_elementwise_funcs.py index 21bb0440ee..cb24929b76 100644 --- a/dpctl/tensor/_elementwise_funcs.py +++ b/dpctl/tensor/_elementwise_funcs.py @@ -113,7 +113,29 @@ # FIXME: implement U09 # U10: ==== CONJ (x) -# FIXME: implement U10 +_conj_docstring = """ +conj(x, out=None, order='K') + +Computes conjugate of each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have numeric data type. + out ({None, usm_ndarray}, optional): + Output array to populate. + Array have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the newly output array, if parameter `out` is `None`. + Default: "K". +Returns: + usm_narray: + An array containing the element-wise conjugate values. The data type + of the returned array is determined by the Type Promotion Rules. +""" + +conj = UnaryElementwiseFunc( + "conj", ti._conj_result_type, ti._conj, _conj_docstring +) # U11: ==== COS (x) _cos_docstring = """ @@ -257,7 +279,30 @@ # FIXME: implement B12 # U16: ==== IMAG (x) -# FIXME: implement U16 +_imag_docstring = """ +imag(x, out=None, order='K') + +Computes imaginary part of each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have numeric data type. + out ({None, usm_ndarray}, optional): + Output array to populate. + Array have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the newly output array, if parameter `out` is `None`. + Default: "K". +Returns: + usm_narray: + An array containing the element-wise imaginary component of input. + The data type of the returned array is determined + by the Type Promotion Rules. +""" + +imag = UnaryElementwiseFunc( + "imag", ti._imag_result_type, ti._imag, _imag_docstring +) # U17: ==== ISFINITE (x) _isfinite_docstring_ = """ @@ -443,8 +488,55 @@ # B21: ==== POW (x1, x2) # FIXME: implement B21 +# U??: ==== PROJ (x) +_proj_docstring = """ +proj(x, out=None, order='K') + +Computes projection of each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have numeric data type. + out ({None, usm_ndarray}, optional): + Output array to populate. + Array have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the newly output array, if parameter `out` is `None`. + Default: "K". +Returns: + usm_narray: + An array containing the element-wise projection. The data + type of the returned array is determined by the Type Promotion Rules. +""" + +proj = UnaryElementwiseFunc( + "proj", ti._proj_result_type, ti._proj, _proj_docstring +) + # U27: ==== REAL (x) -# FIXME: implement U27 +_real_docstring = """ +real(x, out=None, order='K') + +Computes real part of each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have numeric data type. + out ({None, usm_ndarray}, optional): + Output array to populate. + Array have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the newly output array, if parameter `out` is `None`. + Default: "K". +Returns: + usm_narray: + An array containing the element-wise real component of input. The data + type of the returned array is determined by the Type Promotion Rules. +""" + +real = UnaryElementwiseFunc( + "real", ti._real_result_type, ti._real, _real_docstring +) # B22: ==== REMAINDER (x1, x2) # FIXME: implement B22 diff --git a/dpctl/tensor/libtensor/include/kernels/elementwise_functions/conj.hpp b/dpctl/tensor/libtensor/include/kernels/elementwise_functions/conj.hpp new file mode 100644 index 0000000000..24c5a128d0 --- /dev/null +++ b/dpctl/tensor/libtensor/include/kernels/elementwise_functions/conj.hpp @@ -0,0 +1,194 @@ +//=== conj.hpp - Unary function CONJ ------ +//*-C++-*--/===// +// +// Data Parallel Control (dpctl) +// +// Copyright 2020-2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of CONJ(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include + +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/offset_utils.hpp" +#include "utils/type_dispatch.hpp" +#include "utils/type_utils.hpp" +#include + +namespace dpctl +{ +namespace tensor +{ +namespace kernels +{ +namespace conj +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template struct ConjFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) + { + if constexpr (is_complex::value) { + return std::conj(in); + } + else { + if constexpr (!std::is_same_v) + static_assert(std::is_same_v); + return in; + } + } +}; + +template +using ConjContigFunctor = elementwise_common:: + UnaryContigFunctor, vec_sz, n_vecs>; + +template +using ConjStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template struct ConjOutputType +{ + using value_type = typename std::disjunction< // disjunction is C++17 + // feature, supported by DPC++ + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry>, + td_ns::TypeMapResultEntry>, + td_ns::DefaultResultEntry>::result_type; +}; + +template +class conj_contig_kernel; + +template +sycl::event conj_contig_impl(sycl::queue exec_q, + size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + return elementwise_common::unary_contig_impl< + argTy, ConjOutputType, ConjContigFunctor, conj_contig_kernel>( + exec_q, nelems, arg_p, res_p, depends); +} + +template struct ConjContigFactory +{ + fnT get() + { + if constexpr (std::is_same_v::value_type, + void>) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = conj_contig_impl; + return fn; + } + } +}; + +template struct ConjTypeMapFactory +{ + /*! @brief get typeid for output type of std::conj(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename ConjOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template class conj_strided_kernel; + +template +sycl::event +conj_strided_impl(sycl::queue exec_q, + size_t nelems, + int nd, + const py::ssize_t *shape_and_strides, + const char *arg_p, + py::ssize_t arg_offset, + char *res_p, + py::ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, ConjOutputType, ConjStridedFunctor, conj_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template struct ConjStridedFactory +{ + fnT get() + { + if constexpr (std::is_same_v::value_type, + void>) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = conj_strided_impl; + return fn; + } + } +}; + +} // namespace conj +} // namespace kernels +} // namespace tensor +} // namespace dpctl diff --git a/dpctl/tensor/libtensor/include/kernels/elementwise_functions/imag.hpp b/dpctl/tensor/libtensor/include/kernels/elementwise_functions/imag.hpp new file mode 100644 index 0000000000..6c85de5561 --- /dev/null +++ b/dpctl/tensor/libtensor/include/kernels/elementwise_functions/imag.hpp @@ -0,0 +1,193 @@ +//=== imag.hpp - Unary function IMAG ------ +//*-C++-*--/===// +// +// Data Parallel Control (dpctl) +// +// Copyright 2020-2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of IMAG(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include + +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/offset_utils.hpp" +#include "utils/type_dispatch.hpp" +#include "utils/type_utils.hpp" +#include + +namespace dpctl +{ +namespace tensor +{ +namespace kernels +{ +namespace imag +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template struct ImagFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) + { + if constexpr (is_complex::value) { + return std::imag(in); + } + else { + static_assert(std::is_same_v); + return resT{0}; + } + } +}; + +template +using ImagContigFunctor = elementwise_common:: + UnaryContigFunctor, vec_sz, n_vecs>; + +template +using ImagStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template struct ImagOutputType +{ + using value_type = typename std::disjunction< // disjunction is C++17 + // feature, supported by DPC++ + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, float>, + td_ns::TypeMapResultEntry, double>, + td_ns::DefaultResultEntry>::result_type; +}; + +template +class imag_contig_kernel; + +template +sycl::event imag_contig_impl(sycl::queue exec_q, + size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + return elementwise_common::unary_contig_impl< + argTy, ImagOutputType, ImagContigFunctor, imag_contig_kernel>( + exec_q, nelems, arg_p, res_p, depends); +} + +template struct ImagContigFactory +{ + fnT get() + { + if constexpr (std::is_same_v::value_type, + void>) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = imag_contig_impl; + return fn; + } + } +}; + +template struct ImagTypeMapFactory +{ + /*! @brief get typeid for output type of std::imag(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename ImagOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template class imag_strided_kernel; + +template +sycl::event +imag_strided_impl(sycl::queue exec_q, + size_t nelems, + int nd, + const py::ssize_t *shape_and_strides, + const char *arg_p, + py::ssize_t arg_offset, + char *res_p, + py::ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, ImagOutputType, ImagStridedFunctor, imag_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template struct ImagStridedFactory +{ + fnT get() + { + if constexpr (std::is_same_v::value_type, + void>) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = imag_strided_impl; + return fn; + } + } +}; + +} // namespace imag +} // namespace kernels +} // namespace tensor +} // namespace dpctl diff --git a/dpctl/tensor/libtensor/include/kernels/elementwise_functions/proj.hpp b/dpctl/tensor/libtensor/include/kernels/elementwise_functions/proj.hpp new file mode 100644 index 0000000000..c1b89cebec --- /dev/null +++ b/dpctl/tensor/libtensor/include/kernels/elementwise_functions/proj.hpp @@ -0,0 +1,183 @@ +//=== proj.hpp - Unary function CONJ ------ +//*-C++-*--/===// +// +// Data Parallel Control (dpctl) +// +// Copyright 2020-2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of PROJ(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/offset_utils.hpp" +#include "utils/type_dispatch.hpp" +#include "utils/type_utils.hpp" +#include + +namespace dpctl +{ +namespace tensor +{ +namespace kernels +{ +namespace proj +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template struct ProjFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::false_type; + + resT operator()(const argT &in) + { + using realT = typename argT::value_type; + const realT x = std::real(in); + const realT y = std::imag(in); + + if (std::isinf(x) || std::isinf(y)) { + const realT res_im = std::copysign(0.0, y); + return resT{std::numeric_limits::infinity(), res_im}; + } + return in; + } +}; + +template +using ProjContigFunctor = elementwise_common:: + UnaryContigFunctor, vec_sz, n_vecs>; + +template +using ProjStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template struct ProjOutputType +{ + using value_type = typename std::disjunction< // disjunction is C++17 + // feature, supported by DPC++ + td_ns::TypeMapResultEntry>, + td_ns::TypeMapResultEntry>, + td_ns::DefaultResultEntry>::result_type; +}; + +template +class proj_contig_kernel; + +template +sycl::event proj_contig_impl(sycl::queue exec_q, + size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + return elementwise_common::unary_contig_impl< + argTy, ProjOutputType, ProjContigFunctor, proj_contig_kernel>( + exec_q, nelems, arg_p, res_p, depends); +} + +template struct ProjContigFactory +{ + fnT get() + { + if constexpr (std::is_same_v::value_type, + void>) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = proj_contig_impl; + return fn; + } + } +}; + +template struct ProjTypeMapFactory +{ + /*! @brief get typeid for output type of std::proj(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename ProjOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template class proj_strided_kernel; + +template +sycl::event +proj_strided_impl(sycl::queue exec_q, + size_t nelems, + int nd, + const py::ssize_t *shape_and_strides, + const char *arg_p, + py::ssize_t arg_offset, + char *res_p, + py::ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, ProjOutputType, ProjStridedFunctor, proj_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template struct ProjStridedFactory +{ + fnT get() + { + if constexpr (std::is_same_v::value_type, + void>) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = proj_strided_impl; + return fn; + } + } +}; + +} // namespace proj +} // namespace kernels +} // namespace tensor +} // namespace dpctl diff --git a/dpctl/tensor/libtensor/include/kernels/elementwise_functions/real.hpp b/dpctl/tensor/libtensor/include/kernels/elementwise_functions/real.hpp new file mode 100644 index 0000000000..43e256913f --- /dev/null +++ b/dpctl/tensor/libtensor/include/kernels/elementwise_functions/real.hpp @@ -0,0 +1,193 @@ +//=== real.hpp - Unary function REAL ------ +//*-C++-*--/===// +// +// Data Parallel Control (dpctl) +// +// Copyright 2020-2023 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of REAL(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include + +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/offset_utils.hpp" +#include "utils/type_dispatch.hpp" +#include "utils/type_utils.hpp" +#include + +namespace dpctl +{ +namespace tensor +{ +namespace kernels +{ +namespace real +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template struct RealFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) + { + if constexpr (is_complex::value) { + return std::real(in); + } + else { + static_assert(std::is_same_v); + return in; + } + } +}; + +template +using RealContigFunctor = elementwise_common:: + UnaryContigFunctor, vec_sz, n_vecs>; + +template +using RealStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template struct RealOutputType +{ + using value_type = typename std::disjunction< // disjunction is C++17 + // feature, supported by DPC++ + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, float>, + td_ns::TypeMapResultEntry, double>, + td_ns::DefaultResultEntry>::result_type; +}; + +template +class real_contig_kernel; + +template +sycl::event real_contig_impl(sycl::queue exec_q, + size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + return elementwise_common::unary_contig_impl< + argTy, RealOutputType, RealContigFunctor, real_contig_kernel>( + exec_q, nelems, arg_p, res_p, depends); +} + +template struct RealContigFactory +{ + fnT get() + { + if constexpr (std::is_same_v::value_type, + void>) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = real_contig_impl; + return fn; + } + } +}; + +template struct RealTypeMapFactory +{ + /*! @brief get typeid for output type of std::real(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename RealOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template class real_strided_kernel; + +template +sycl::event +real_strided_impl(sycl::queue exec_q, + size_t nelems, + int nd, + const py::ssize_t *shape_and_strides, + const char *arg_p, + py::ssize_t arg_offset, + char *res_p, + py::ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, RealOutputType, RealStridedFunctor, real_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template struct RealStridedFactory +{ + fnT get() + { + if constexpr (std::is_same_v::value_type, + void>) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = real_strided_impl; + return fn; + } + } +}; + +} // namespace real +} // namespace kernels +} // namespace tensor +} // namespace dpctl diff --git a/dpctl/tensor/libtensor/source/elementwise_functions.cpp b/dpctl/tensor/libtensor/source/elementwise_functions.cpp index 58e45483e9..88597512bc 100644 --- a/dpctl/tensor/libtensor/source/elementwise_functions.cpp +++ b/dpctl/tensor/libtensor/source/elementwise_functions.cpp @@ -34,16 +34,20 @@ #include "kernels/elementwise_functions/abs.hpp" #include "kernels/elementwise_functions/add.hpp" +#include "kernels/elementwise_functions/conj.hpp" #include "kernels/elementwise_functions/cos.hpp" #include "kernels/elementwise_functions/equal.hpp" #include "kernels/elementwise_functions/exp.hpp" #include "kernels/elementwise_functions/expm1.hpp" +#include "kernels/elementwise_functions/imag.hpp" #include "kernels/elementwise_functions/isfinite.hpp" #include "kernels/elementwise_functions/isinf.hpp" #include "kernels/elementwise_functions/isnan.hpp" #include "kernels/elementwise_functions/log.hpp" #include "kernels/elementwise_functions/log1p.hpp" #include "kernels/elementwise_functions/multiply.hpp" +#include "kernels/elementwise_functions/proj.hpp" +#include "kernels/elementwise_functions/real.hpp" #include "kernels/elementwise_functions/sin.hpp" #include "kernels/elementwise_functions/sqrt.hpp" #include "kernels/elementwise_functions/subtract.hpp" @@ -303,7 +307,35 @@ namespace impl // U10: ==== CONJ (x) namespace impl { -// FIXME: add code for U10 + +namespace conj_fn_ns = dpctl::tensor::kernels::conj; + +static unary_contig_impl_fn_ptr_t conj_contig_dispatch_vector[td_ns::num_types]; +static int conj_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + conj_strided_dispatch_vector[td_ns::num_types]; + +void populate_conj_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = conj_fn_ns; + + using fn_ns::ConjContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(conj_contig_dispatch_vector); + + using fn_ns::ConjStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(conj_strided_dispatch_vector); + + using fn_ns::ConjTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(conj_output_typeid_vector); +} } // namespace impl // U11: ==== COS (x) @@ -552,7 +584,35 @@ namespace impl // U16: ==== IMAG (x) namespace impl { -// FIXME: add code for U16 + +namespace imag_fn_ns = dpctl::tensor::kernels::imag; + +static unary_contig_impl_fn_ptr_t imag_contig_dispatch_vector[td_ns::num_types]; +static int imag_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + imag_strided_dispatch_vector[td_ns::num_types]; + +void populate_imag_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = imag_fn_ns; + + using fn_ns::ImagContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(imag_contig_dispatch_vector); + + using fn_ns::ImagStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(imag_strided_dispatch_vector); + + using fn_ns::ImagTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(imag_output_typeid_vector); +} } // namespace impl // U17: ==== ISFINITE (x) @@ -879,10 +939,72 @@ namespace impl // FIXME: add code for B21 } // namespace impl +// U??: ==== PROJ (x) +namespace impl +{ + +namespace proj_fn_ns = dpctl::tensor::kernels::proj; + +static unary_contig_impl_fn_ptr_t proj_contig_dispatch_vector[td_ns::num_types]; +static int proj_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + proj_strided_dispatch_vector[td_ns::num_types]; + +void populate_proj_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = proj_fn_ns; + + using fn_ns::ProjContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(proj_contig_dispatch_vector); + + using fn_ns::ProjStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(proj_strided_dispatch_vector); + + using fn_ns::ProjTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(proj_output_typeid_vector); +} +} // namespace impl + // U27: ==== REAL (x) namespace impl { -// FIXME: add code for U27 + +namespace real_fn_ns = dpctl::tensor::kernels::real; + +static unary_contig_impl_fn_ptr_t real_contig_dispatch_vector[td_ns::num_types]; +static int real_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + real_strided_dispatch_vector[td_ns::num_types]; + +void populate_real_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = real_fn_ns; + + using fn_ns::RealContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(real_contig_dispatch_vector); + + using fn_ns::RealStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(real_strided_dispatch_vector); + + using fn_ns::RealTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(real_output_typeid_vector); +} } // namespace impl // B22: ==== REMAINDER (x1, x2) @@ -1185,7 +1307,26 @@ void init_elementwise_functions(py::module_ m) // FIXME: // U10: ==== CONJ (x) - // FIXME: + { + impl::populate_conj_dispatch_vectors(); + using impl::conj_contig_dispatch_vector; + using impl::conj_output_typeid_vector; + using impl::conj_strided_dispatch_vector; + + auto conj_pyapi = [&](arrayT src, arrayT dst, sycl::queue exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, conj_output_typeid_vector, + conj_contig_dispatch_vector, conj_strided_dispatch_vector); + }; + m.def("_conj", conj_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto conj_result_type_pyapi = [&](py::dtype dtype) { + return py_unary_ufunc_result_type(dtype, conj_output_typeid_vector); + }; + m.def("_conj_result_type", conj_result_type_pyapi); + } // U11: ==== COS (x) { @@ -1351,7 +1492,26 @@ void init_elementwise_functions(py::module_ m) // FIXME: // U16: ==== IMAG (x) - // FIXME: + { + impl::populate_imag_dispatch_vectors(); + using impl::imag_contig_dispatch_vector; + using impl::imag_output_typeid_vector; + using impl::imag_strided_dispatch_vector; + + auto imag_pyapi = [&](arrayT src, arrayT dst, sycl::queue exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, imag_output_typeid_vector, + imag_contig_dispatch_vector, imag_strided_dispatch_vector); + }; + m.def("_imag", imag_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto imag_result_type_pyapi = [&](py::dtype dtype) { + return py_unary_ufunc_result_type(dtype, imag_output_typeid_vector); + }; + m.def("_imag_result_type", imag_result_type_pyapi); + } // U17: ==== ISFINITE (x) { @@ -1550,8 +1710,49 @@ void init_elementwise_functions(py::module_ m) // B21: ==== POW (x1, x2) // FIXME: + // U??: ==== PROJ (x) + { + impl::populate_proj_dispatch_vectors(); + using impl::proj_contig_dispatch_vector; + using impl::proj_output_typeid_vector; + using impl::proj_strided_dispatch_vector; + + auto proj_pyapi = [&](arrayT src, arrayT dst, sycl::queue exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, proj_output_typeid_vector, + proj_contig_dispatch_vector, proj_strided_dispatch_vector); + }; + m.def("_proj", proj_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto proj_result_type_pyapi = [&](py::dtype dtype) { + return py_unary_ufunc_result_type(dtype, proj_output_typeid_vector); + }; + m.def("_proj_result_type", proj_result_type_pyapi); + } + // U27: ==== REAL (x) - // FIXME: + { + impl::populate_real_dispatch_vectors(); + using impl::real_contig_dispatch_vector; + using impl::real_output_typeid_vector; + using impl::real_strided_dispatch_vector; + + auto real_pyapi = [&](arrayT src, arrayT dst, sycl::queue exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, real_output_typeid_vector, + real_contig_dispatch_vector, real_strided_dispatch_vector); + }; + m.def("_real", real_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto real_result_type_pyapi = [&](py::dtype dtype) { + return py_unary_ufunc_result_type(dtype, real_output_typeid_vector); + }; + m.def("_real_result_type", real_result_type_pyapi); + } // B22: ==== REMAINDER (x1, x2) // FIXME: diff --git a/dpctl/tests/elementwise/test_complex.py b/dpctl/tests/elementwise/test_complex.py new file mode 100644 index 0000000000..e1d4ebb66b --- /dev/null +++ b/dpctl/tests/elementwise/test_complex.py @@ -0,0 +1,198 @@ +# Data Parallel Control (dpctl) +# +# Copyright 2020-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools + +import numpy as np +import pytest +from numpy.testing import assert_allclose + +import dpctl.tensor as dpt +from dpctl.tests.helper import get_queue_or_skip, skip_if_dtype_not_supported + +from .utils import _all_dtypes, _map_to_device_dtype, _usm_types + + +@pytest.mark.parametrize("dtype", _all_dtypes) +def test_complex_out_type(dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + X = dpt.asarray(0, dtype=dtype, sycl_queue=q) + expected_dtype = np.real(np.array(0, dtype=dtype)).dtype + expected_dtype = _map_to_device_dtype(expected_dtype, q.sycl_device) + assert dpt.real(X).dtype == expected_dtype + + expected_dtype = np.imag(np.array(0, dtype=dtype)).dtype + expected_dtype = _map_to_device_dtype(expected_dtype, q.sycl_device) + assert dpt.imag(X).dtype == expected_dtype + + expected_dtype = np.conj(np.array(0, dtype=dtype)).dtype + expected_dtype = _map_to_device_dtype(expected_dtype, q.sycl_device) + assert dpt.conj(X).dtype == expected_dtype + + +@pytest.mark.parametrize( + "np_call, dpt_call", + [(np.real, dpt.real), (np.imag, dpt.imag), (np.conj, dpt.conj)], +) +@pytest.mark.parametrize("dtype", _all_dtypes) +def test_complex_output(np_call, dpt_call, dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + n_seq = 100 + + x1 = np.linspace(0, 10, num=n_seq, dtype=dtype) + x2 = np.linspace(0, 20, num=n_seq, dtype=dtype) + Xnp = x1 + 1j * x2 + X = dpt.asarray(Xnp, dtype=Xnp.dtype, sycl_queue=q) + + Y = dpt_call(X) + tol = 8 * dpt.finfo(Y.dtype).resolution + + assert_allclose(dpt.asnumpy(Y), np_call(Xnp), atol=tol, rtol=tol) + + Z = dpt.empty_like(X, dtype=np_call(Xnp).dtype) + dpt_call(X, out=Z) + + assert_allclose(dpt.asnumpy(Z), np_call(Xnp), atol=tol, rtol=tol) + + +@pytest.mark.parametrize( + "np_call, dpt_call", + [(np.real, dpt.real), (np.imag, dpt.imag), (np.conj, dpt.conj)], +) +@pytest.mark.parametrize("usm_type", _usm_types) +def test_complex_usm_type(np_call, dpt_call, usm_type): + q = get_queue_or_skip() + + arg_dt = np.dtype("c8") + input_shape = (10, 10, 10, 10) + X = dpt.empty(input_shape, dtype=arg_dt, usm_type=usm_type, sycl_queue=q) + X[..., 0::2] = np.pi / 6 + 1j * np.pi / 3 + X[..., 1::2] = np.pi / 3 + 1j * np.pi / 6 + + Y = dpt_call(X) + assert Y.usm_type == X.usm_type + assert Y.sycl_queue == X.sycl_queue + assert Y.flags.c_contiguous + + expected_Y = np.empty(input_shape, dtype=arg_dt) + expected_Y[..., 0::2] = np_call(np.complex64(np.pi / 6 + 1j * np.pi / 3)) + expected_Y[..., 1::2] = np_call(np.complex64(np.pi / 3 + 1j * np.pi / 6)) + tol = 8 * dpt.finfo(Y.dtype).resolution + + assert_allclose(dpt.asnumpy(Y), expected_Y, atol=tol, rtol=tol) + + +@pytest.mark.parametrize( + "np_call, dpt_call", + [(np.real, dpt.real), (np.imag, dpt.imag), (np.conj, dpt.conj)], +) +@pytest.mark.parametrize("dtype", _all_dtypes) +def test_complex_order(np_call, dpt_call, dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + arg_dt = np.dtype(dtype) + input_shape = (10, 10, 10, 10) + X = dpt.empty(input_shape, dtype=arg_dt, sycl_queue=q) + X[..., 0::2] = np.pi / 6 + 1j * np.pi / 3 + X[..., 1::2] = np.pi / 3 + 1j * np.pi / 6 + + for ord in ["C", "F", "A", "K"]: + for perms in itertools.permutations(range(4)): + U = dpt.permute_dims(X[:, ::-1, ::-1, :], perms) + Y = dpt_call(U, order=ord) + expected_Y = np_call(dpt.asnumpy(U)) + assert np.allclose(dpt.asnumpy(Y), expected_Y) + + +@pytest.mark.parametrize("dtype", ["c8", "c16"]) +def test_projection_complex(dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + X = [ + complex(1, 2), + complex(dpt.inf, -1), + complex(0, -dpt.inf), + complex(-dpt.inf, dpt.nan), + ] + Y = [ + complex(1, 2), + complex(np.inf, -0.0), + complex(np.inf, -0.0), + complex(np.inf, 0.0), + ] + + Xf = dpt.asarray(X, dtype=dtype, sycl_queue=q) + Yf = np.array(Y, dtype=dtype) + + tol = 8 * dpt.finfo(Xf.dtype).resolution + assert_allclose(dpt.asnumpy(dpt.proj(Xf)), Yf, atol=tol, rtol=tol) + + +@pytest.mark.parametrize("dtype", _all_dtypes) +def test_projection(dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + Xf = dpt.asarray(1, dtype=dtype, sycl_queue=q) + out_dtype = dpt.proj(Xf).dtype + Yf = np.array(complex(1, 0), dtype=out_dtype) + + tol = 8 * dpt.finfo(Yf.dtype).resolution + assert_allclose(dpt.asnumpy(dpt.proj(Xf)), Yf, atol=tol, rtol=tol) + + +@pytest.mark.parametrize( + "np_call, dpt_call", + [(np.real, dpt.real), (np.imag, dpt.imag), (np.conj, dpt.conj)], +) +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +@pytest.mark.parametrize("stride", [-1, 1, 2, 4, 5]) +def test_complex_strided(np_call, dpt_call, dtype, stride): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + N = 100 + rng = np.random.default_rng(42) + x1 = rng.standard_normal(N, dtype) + x2 = 1j * rng.standard_normal(N, dtype) + x = x1 + x2 + y = np_call(x[::stride]) + z = dpt_call(dpt.asarray(x[::stride])) + + tol = 8 * dpt.finfo(y.dtype).resolution + assert_allclose(y, dpt.asnumpy(z), atol=tol, rtol=tol) + + +@pytest.mark.parametrize("dtype", ["f2", "f4", "f8"]) +def test_complex_special_cases(dtype): + q = get_queue_or_skip() + skip_if_dtype_not_supported(dtype, q) + + x = [np.nan, -np.nan, np.inf, -np.inf] + with np.errstate(all="ignore"): + Xnp = 1j * np.array(x, dtype=dtype) + X = dpt.asarray(Xnp, dtype=Xnp.dtype) + + tol = 8 * dpt.finfo(dtype).resolution + assert_allclose(dpt.asnumpy(dpt.real(X)), np.real(Xnp), atol=tol, rtol=tol) + assert_allclose(dpt.asnumpy(dpt.imag(X)), np.imag(Xnp), atol=tol, rtol=tol) + assert_allclose(dpt.asnumpy(dpt.conj(X)), np.conj(Xnp), atol=tol, rtol=tol)