diff --git a/sycl/doc/GetStartedGuide.md b/sycl/doc/GetStartedGuide.md index 70d91b96c5cbd..34eff1725cfa0 100644 --- a/sycl/doc/GetStartedGuide.md +++ b/sycl/doc/GetStartedGuide.md @@ -424,6 +424,11 @@ skipped. If CUDA support has been built, it is tested only if there are CUDA devices available. +If testing with ROCm for AMD make sure to specify the GPU being used +by adding `-Xsycl-target-backend=amdgcn-amd-amdhsa-sycldevice +--offload-arch=` to the CMake variable +`SYCL_CLANG_EXTRA_FLAGS`. + #### Run DPC++ E2E test suite Follow instructions from the link below to build and run tests: diff --git a/sycl/test/CMakeLists.txt b/sycl/test/CMakeLists.txt index 76612d17e1ff2..59f467bcb08d4 100644 --- a/sycl/test/CMakeLists.txt +++ b/sycl/test/CMakeLists.txt @@ -75,6 +75,34 @@ if(SYCL_BUILD_PI_CUDA) add_custom_target(check-sycl-cuda) add_dependencies(check-sycl-cuda check-sycl-ptx) add_dependencies(check-sycl check-sycl-cuda) +endif() + +if(SYCL_BUILD_PI_ROCM) + add_custom_target(check-sycl-rocm) + if("${SYCL_BUILD_PI_ROCM_PLATFORM}" STREQUAL "NVIDIA") + add_lit_testsuite(check-sycl-rocm-ptx "Running device-agnostic SYCL regression tests for ROCm NVidia PTX" + ${CMAKE_CURRENT_BINARY_DIR} + ARGS ${RT_TEST_ARGS} + PARAMS "SYCL_TRIPLE=nvptx64-nvidia-cuda-sycldevice;SYCL_PLUGIN=rocm" + DEPENDS ${SYCL_TEST_DEPS} + EXCLUDE_FROM_CHECK_ALL + ) + + add_dependencies(check-sycl-rocm check-sycl-rocm-ptx) + elseif("${SYCL_BUILD_PI_ROCM_PLATFORM}" STREQUAL "AMD") + add_lit_testsuite(check-sycl-rocm-gcn "Running device-agnostic SYCL regression tests for ROCm AMDGCN" + ${CMAKE_CURRENT_BINARY_DIR} + ARGS ${RT_TEST_ARGS} + PARAMS "SYCL_TRIPLE=amdgcn-amd-amdhsa-sycldevice;SYCL_PLUGIN=rocm" + DEPENDS ${SYCL_TEST_DEPS} + EXCLUDE_FROM_CHECK_ALL + ) + + add_dependencies(check-sycl-rocm check-sycl-rocm-gcn) + else() + message(FATAL_ERROR "SYCL_BUILD_PI_ROCM_PLATFORM must be set to either 'AMD' or 'NVIDIA' (set to: '${SYCL_BUILD_PI_ROCM_PLATFORM}')") + endif() + add_dependencies(check-sycl check-sycl-rocm) endif() add_subdirectory(on-device) diff --git a/sycl/test/basic_tests/built-ins.cpp b/sycl/test/basic_tests/built-ins.cpp index 07d8179925d0b..8f221dfc5419a 100644 --- a/sycl/test/basic_tests/built-ins.cpp +++ b/sycl/test/basic_tests/built-ins.cpp @@ -3,6 +3,10 @@ // CUDA does not support printf. // UNSUPPORTED: cuda +// +// Hits an assertion with AMD: +// XFAIL: rocm_amd + #include #include diff --git a/sycl/test/esimd/odr.cpp b/sycl/test/esimd/odr.cpp index 9c593e27b21f6..168a52b31e19c 100644 --- a/sycl/test/esimd/odr.cpp +++ b/sycl/test/esimd/odr.cpp @@ -11,6 +11,9 @@ // Cuda does not support intrinsics generated by the ESIMD compilation path: // UNSUPPORTED: cuda // +// Linking issues with AMD: +// XFAIL: rocm_amd + #include #include #include diff --git a/sycl/test/extensions/group-algorithm.cpp b/sycl/test/extensions/group-algorithm.cpp index 51bbc15074119..7698da76f565c 100644 --- a/sycl/test/extensions/group-algorithm.cpp +++ b/sycl/test/extensions/group-algorithm.cpp @@ -1,6 +1,10 @@ // RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -I . -o %t.out // Group operations are not supported on host device. The test checks that // compilation succeeded. +// +// Missing __spirv_GroupIAdd, __spirv_GroupAll, __spirv_GroupBroadcast, +// __spirv_GroupAny, __spirv_GroupSMin on AMD: +// XFAIL: rocm_amd // TODO: enable compile+runtime checks for operations defined in SPIR-V 1.3. // That requires either adding a switch to clang (-spirv-max-version=1.3) or diff --git a/sycl/test/lit.cfg.py b/sycl/test/lit.cfg.py index e2134956e9ceb..05a0d37f8e36f 100644 --- a/sycl/test/lit.cfg.py +++ b/sycl/test/lit.cfg.py @@ -103,6 +103,15 @@ if triple == 'nvptx64-nvidia-cuda-sycldevice': config.available_features.add('cuda') +if triple == 'amdgcn-amd-amdhsa-sycldevice': + config.available_features.add('rocm_amd') + # For AMD the specific GPU has to be specified with --offload-arch + if not re.match('.*--offload-arch.*', config.sycl_clang_extra_flags): + raise Exception("Error: missing --offload-arch flag when trying to " \ + "run lit tests for AMD GPU, please add " \ + "`-Xsycl-target-backend=amdgcn-amd-amdhsa-sycldevice --offload-arch=` to " \ + "the CMake variable SYCL_CLANG_EXTRA_FLAGS") + # Set timeout for test = 10 mins try: import psutil diff --git a/sycl/test/on-device/CMakeLists.txt b/sycl/test/on-device/CMakeLists.txt index 00ed76ad66fee..96b04ec252b11 100644 --- a/sycl/test/on-device/CMakeLists.txt +++ b/sycl/test/on-device/CMakeLists.txt @@ -40,3 +40,17 @@ if(SYCL_BUILD_PI_CUDA) add_dependencies(check-sycl-cuda check-sycl-cuda-on-device) endif() endif() + +if(SYCL_BUILD_PI_ROCM) + add_lit_testsuite(check-sycl-rocm-on-device "Running the SYCL regression tests for ROCm" + ${CMAKE_CURRENT_BINARY_DIR} + ARGS ${RT_TEST_ARGS} + PARAMS "SYCL_PLUGIN=rocm" + DEPENDS ${SYCL_TEST_DEPS} + EXCLUDE_FROM_CHECK_ALL + ) + set_target_properties(check-sycl-rocm-on-device PROPERTIES FOLDER "SYCL ROCm device tests") + if(TARGET check-sycl-rocm) + add_dependencies(check-sycl-rocm check-sycl-rocm-on-device) + endif() +endif() diff --git a/sycl/test/on-device/back_to_back_collectives.cpp b/sycl/test/on-device/back_to_back_collectives.cpp index 36a39730933f1..4e576b8d6267e 100644 --- a/sycl/test/on-device/back_to_back_collectives.cpp +++ b/sycl/test/on-device/back_to_back_collectives.cpp @@ -3,6 +3,9 @@ // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out +// +// Missing __spirv_GroupIAdd on AMD: +// XFAIL: rocm_amd #include #include diff --git a/sycl/test/on-device/basic_tests/aspects.cpp b/sycl/test/on-device/basic_tests/aspects.cpp index 37961815f392d..bc5b04f4ff0c9 100644 --- a/sycl/test/on-device/basic_tests/aspects.cpp +++ b/sycl/test/on-device/basic_tests/aspects.cpp @@ -1,5 +1,9 @@ // RUN: %clangxx -fsycl %s -o %t.out // RUN: env SYCL_DEVICE_FILTER=%sycl_be %t.out +// +// Hip is missing some of the parameters tested here so it fails with ROCm for +// NVIDIA +// XFAIL: rocm_nvidia //==--------------- aspects.cpp - SYCL device test ------------------------==// // diff --git a/sycl/test/on-device/basic_tests/specialization_constants/host_apis.cpp b/sycl/test/on-device/basic_tests/specialization_constants/host_apis.cpp index 36c9f12a22008..c373291938a2e 100644 --- a/sycl/test/on-device/basic_tests/specialization_constants/host_apis.cpp +++ b/sycl/test/on-device/basic_tests/specialization_constants/host_apis.cpp @@ -2,6 +2,8 @@ // RUN: %t.out // UNSUPPORTED: cuda +// UNSUPPORTED: rocm_nvidia +// UNSUPPORTED: rocm_amd #include diff --git a/sycl/test/on-device/basic_tests/specialization_constants/kernel_lambda_with_kernel_handler_arg.cpp b/sycl/test/on-device/basic_tests/specialization_constants/kernel_lambda_with_kernel_handler_arg.cpp index d249d4396a00e..dd7a27b8db357 100644 --- a/sycl/test/on-device/basic_tests/specialization_constants/kernel_lambda_with_kernel_handler_arg.cpp +++ b/sycl/test/on-device/basic_tests/specialization_constants/kernel_lambda_with_kernel_handler_arg.cpp @@ -1,5 +1,8 @@ // RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out // RUN: %CPU_RUN_PLACEHOLDER %t.out +// +// Hits an assert in the Lower Work Group Scope Code pass on AMD: +// XFAIL: rocm_amd // This test checks all possible scenarios of running single_task, parallel_for // and parallel_for_work_group to verify that this code compiles and runs diff --git a/sycl/test/on-device/basic_tests/specialization_constants/non_native/aot_w_kernel_handler_wo_spec_consts.cpp b/sycl/test/on-device/basic_tests/specialization_constants/non_native/aot_w_kernel_handler_wo_spec_consts.cpp index 4a52afbcbca16..f0a71e511076c 100644 --- a/sycl/test/on-device/basic_tests/specialization_constants/non_native/aot_w_kernel_handler_wo_spec_consts.cpp +++ b/sycl/test/on-device/basic_tests/specialization_constants/non_native/aot_w_kernel_handler_wo_spec_consts.cpp @@ -1,5 +1,8 @@ // RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out // RUN: %CPU_RUN_PLACEHOLDER %t.out +// +// Hits an assert in the Lower Work Group Scope Code pass on AMD: +// XFAIL: rocm_amd // This test checks correctness of compiling and running of application with // kernel lambdas containing kernel_handler arguments and w/o usage of diff --git a/sycl/test/on-device/basic_tests/specialization_constants/non_native/gpu.cpp b/sycl/test/on-device/basic_tests/specialization_constants/non_native/gpu.cpp index 163b4b82b0f32..879fc95e943d4 100644 --- a/sycl/test/on-device/basic_tests/specialization_constants/non_native/gpu.cpp +++ b/sycl/test/on-device/basic_tests/specialization_constants/non_native/gpu.cpp @@ -1,6 +1,10 @@ // REQUIRES: ocloc, gpu, TEMPORARY_DISABLED // UNSUPPORTED: cuda // CUDA is not compatible with SPIR. +// +// UNSUPPORTED: rocm_nvidia +// UNSUPPORTED: rocm_amd +// ROCm is not compatible with SPIR. // RUN: %clangxx -fsycl -fsycl-targets=spir64_gen-unknown-unknown-sycldevice -Xsycl-target-backend=spir64_gen-unknown-unknown-sycldevice "-device *" %S/Inputs/common.cpp -o %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out diff --git a/sycl/test/on-device/extensions/intel-ext-device.cpp b/sycl/test/on-device/extensions/intel-ext-device.cpp index 55e08749fd774..e64cb21eeeca2 100644 --- a/sycl/test/on-device/extensions/intel-ext-device.cpp +++ b/sycl/test/on-device/extensions/intel-ext-device.cpp @@ -4,6 +4,8 @@ // // REQUIRES: gpu // UNSUPPORTED: cuda +// UNSUPPORTED: rocm_nvidia +// UNSUPPORTED: rocm_amd //==--------- intel-ext-device.cpp - SYCL device test ------------==// // diff --git a/sycl/test/on-device/group_algorithms_sycl2020/all_of.cpp b/sycl/test/on-device/group_algorithms_sycl2020/all_of.cpp index 9d76fa2aaa48d..80723b5f3f90f 100644 --- a/sycl/test/on-device/group_algorithms_sycl2020/all_of.cpp +++ b/sycl/test/on-device/group_algorithms_sycl2020/all_of.cpp @@ -2,6 +2,9 @@ // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out +// +// Missing __spirv_GroupAll on AMD: +// XFAIL: rocm_amd #include "support.h" #include diff --git a/sycl/test/on-device/group_algorithms_sycl2020/any_of.cpp b/sycl/test/on-device/group_algorithms_sycl2020/any_of.cpp index 5dc5922f4391f..9f681ec20c4bb 100644 --- a/sycl/test/on-device/group_algorithms_sycl2020/any_of.cpp +++ b/sycl/test/on-device/group_algorithms_sycl2020/any_of.cpp @@ -2,6 +2,9 @@ // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out +// +// Missing __spirv_GroupAny on AMD: +// XFAIL: rocm_amd #include "support.h" #include diff --git a/sycl/test/on-device/group_algorithms_sycl2020/exclusive_scan.cpp b/sycl/test/on-device/group_algorithms_sycl2020/exclusive_scan.cpp index 1987e52656a59..29d74edf83796 100644 --- a/sycl/test/on-device/group_algorithms_sycl2020/exclusive_scan.cpp +++ b/sycl/test/on-device/group_algorithms_sycl2020/exclusive_scan.cpp @@ -2,6 +2,10 @@ // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out +// +// Missing __spirv_GroupIAdd, __spirv_GroupBroadcast, __spirv_GroupSMin and +// __spirv_GroupSMax on AMD: +// XFAIL: rocm_amd // TODO: enable compile+runtime checks for operations defined in SPIR-V 1.3. // That requires either adding a switch to clang (-spirv-max-version=1.3) or diff --git a/sycl/test/on-device/group_algorithms_sycl2020/group_broadcast.cpp b/sycl/test/on-device/group_algorithms_sycl2020/group_broadcast.cpp index 2fadb3445626f..dafec4e8ebf3c 100644 --- a/sycl/test/on-device/group_algorithms_sycl2020/group_broadcast.cpp +++ b/sycl/test/on-device/group_algorithms_sycl2020/group_broadcast.cpp @@ -2,6 +2,9 @@ // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out +// +// Missing __spirv_GroupBroadcast on AMD: +// XFAIL: rocm_amd #include "support.h" #include diff --git a/sycl/test/on-device/group_algorithms_sycl2020/inclusive_scan.cpp b/sycl/test/on-device/group_algorithms_sycl2020/inclusive_scan.cpp index f855c0717efcb..7289a5af50f62 100644 --- a/sycl/test/on-device/group_algorithms_sycl2020/inclusive_scan.cpp +++ b/sycl/test/on-device/group_algorithms_sycl2020/inclusive_scan.cpp @@ -2,6 +2,10 @@ // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out +// +// Missing __spirv_GroupIAdd, __spirv_GroupBroadcast, __spirv_GroupSMin and +// __spirv_GroupSMax on AMD: +// XFAIL: rocm_amd // TODO: enable compile+runtime checks for operations defined in SPIR-V 1.3. // That requires either adding a switch to clang (-spirv-max-version=1.3) or diff --git a/sycl/test/on-device/group_algorithms_sycl2020/none_of.cpp b/sycl/test/on-device/group_algorithms_sycl2020/none_of.cpp index 010641bc83560..b2b0eb0d7d825 100644 --- a/sycl/test/on-device/group_algorithms_sycl2020/none_of.cpp +++ b/sycl/test/on-device/group_algorithms_sycl2020/none_of.cpp @@ -2,6 +2,9 @@ // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out +// +// Missing __spirv_GroupAll and __spirv_GroupAny on AMD: +// XFAIL: rocm_amd #include "support.h" #include diff --git a/sycl/test/on-device/group_algorithms_sycl2020/permute_select.cpp b/sycl/test/on-device/group_algorithms_sycl2020/permute_select.cpp index b0fbb35982573..3eb95eebab72f 100644 --- a/sycl/test/on-device/group_algorithms_sycl2020/permute_select.cpp +++ b/sycl/test/on-device/group_algorithms_sycl2020/permute_select.cpp @@ -3,6 +3,9 @@ // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out // +// Missing __spirv_SubgroupId, __spirv_SubgroupMaxSize, __spirv_SubgroupShuffle* on AMD: +// XFAIL: rocm_amd +// //==------------ permute_select.cpp -*- C++ -*-----------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. diff --git a/sycl/test/on-device/group_algorithms_sycl2020/reduce.cpp b/sycl/test/on-device/group_algorithms_sycl2020/reduce.cpp index 4ad407cf5ccb8..d13e626f32504 100644 --- a/sycl/test/on-device/group_algorithms_sycl2020/reduce.cpp +++ b/sycl/test/on-device/group_algorithms_sycl2020/reduce.cpp @@ -2,6 +2,9 @@ // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out +// +// Missinsg __spirv_GroupIAdd, __spirv_GroupSMin and __spirv_GroupSMax on AMD: +// XFAIL: rocm_amd // TODO: enable compile+runtime checks for operations defined in SPIR-V 1.3. // That requires either adding a switch to clang (-spirv-max-version=1.3) or diff --git a/sycl/test/on-device/group_algorithms_sycl2020/shift_left_right.cpp b/sycl/test/on-device/group_algorithms_sycl2020/shift_left_right.cpp index e3c97cac0e81d..3e26ef9bba99c 100644 --- a/sycl/test/on-device/group_algorithms_sycl2020/shift_left_right.cpp +++ b/sycl/test/on-device/group_algorithms_sycl2020/shift_left_right.cpp @@ -3,6 +3,9 @@ // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out // +// Missing __spirv_SubgroupId, __spirv_SubgroupMaxSize, __spirv_SubgroupShuffle* on AMD: +// XFAIL: rocm_amd +// //==------------ shift_left_right.cpp -*- C++ -*----------------------------==// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. diff --git a/sycl/test/on-device/lit.cfg.py b/sycl/test/on-device/lit.cfg.py index ada28d7c51728..f5b8160d9fcf0 100644 --- a/sycl/test/on-device/lit.cfg.py +++ b/sycl/test/on-device/lit.cfg.py @@ -105,6 +105,8 @@ def getDeviceCount(device_type): is_cuda = False; is_level_zero = False; + is_rocm_amd = False; + is_rocm_nvidia = False; process = subprocess.Popen([get_device_count_by_type_path, device_type, backend], stdout=subprocess.PIPE) (output, err) = process.communicate() @@ -130,11 +132,15 @@ def getDeviceCount(device_type): is_cuda = True; if re.match(r".*level zero", result[1]): is_level_zero = True; + if re.match(r".*rocm-amd", result[1]): + is_rocm_amd = True; + if re.match(r".*rocm-nvidia", result[1]): + is_rocm_nvidia = True; if err: lit_config.warning("getDeviceCount {TYPE} {BACKEND} stderr:{ERR}".format( TYPE=device_type, BACKEND=backend, ERR=err)) - return [value,is_cuda,is_level_zero] + return [value,is_cuda,is_level_zero,is_rocm_amd,is_rocm_nvidia] # check if compiler supports CL command line options cl_options=False @@ -220,7 +226,9 @@ def getDeviceCount(device_type): cuda = False level_zero = False -[gpu_count, cuda, level_zero] = getDeviceCount("gpu") +rocm_amd = False +rocm_nvidia = False +[gpu_count, cuda, level_zero, rocm_amd, rocm_nvidia] = getDeviceCount("gpu") if gpu_count > 0: found_at_least_one_device = True @@ -232,6 +240,16 @@ def getDeviceCount(device_type): config.available_features.add('cuda') elif level_zero: config.available_features.add('level_zero') + elif rocm_amd: + config.available_features.add('rocm_amd') + # For AMD the specific GPU has to be specified with --offload-arch + if not re.match('.*--offload-arch.*', config.sycl_clang_extra_flags): + raise Exception("Error: missing --offload-arch flag when trying to " \ + "run lit tests for AMD GPU, please add " \ + "`-Xsycl-target-backend=amdgcn-amd-amdhsa-sycldevice --offload-arch=` to " \ + "the CMake variable SYCL_CLANG_EXTRA_FLAGS") + elif rocm_nvidia: + config.available_features.add('rocm_nvidia') if platform.system() == "Linux": gpu_run_on_linux_substitute = "env SYCL_DEVICE_FILTER={SYCL_PLUGIN}:gpu,host ".format(SYCL_PLUGIN=backend) @@ -261,8 +279,10 @@ def getDeviceCount(device_type): if not cuda and not level_zero and found_at_least_one_device: config.available_features.add('opencl') -if cuda: +if cuda or rocm_nvidia: config.substitutions.append( ('%sycl_triple', "nvptx64-nvidia-cuda-sycldevice" ) ) +elif rocm_amd: + config.substitutions.append( ('%sycl_triple', "amdgcn-amd-amdhsa-sycldevice" ) ) else: config.substitutions.append( ('%sycl_triple', "spir64-unknown-unknown-sycldevice" ) ) diff --git a/sycl/test/on-device/span/span.cpp b/sycl/test/on-device/span/span.cpp index 2a5cdd8c5640d..f7c4091244233 100644 --- a/sycl/test/on-device/span/span.cpp +++ b/sycl/test/on-device/span/span.cpp @@ -2,6 +2,9 @@ // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out +// +// Fails to release USM pointer on ROCm for NVIDIA +// XFAIL: rocm_nvidia #include #include diff --git a/sycl/test/on-device/srgb/srgba-read.cpp b/sycl/test/on-device/srgb/srgba-read.cpp index 6e7ca3ea0eb58..dd377af36d6a8 100644 --- a/sycl/test/on-device/srgb/srgba-read.cpp +++ b/sycl/test/on-device/srgb/srgba-read.cpp @@ -4,6 +4,8 @@ // XFAIL: level_zero // UNSUPPORTED: cuda +// UNSUPPORTED: rocm_nvidia +// UNSUPPORTED: rocm_amd #include diff --git a/sycl/unittests/SYCL2020/KernelBundle.cpp b/sycl/unittests/SYCL2020/KernelBundle.cpp index c8c2edb48f0ba..1003c8b2a44ee 100644 --- a/sycl/unittests/SYCL2020/KernelBundle.cpp +++ b/sycl/unittests/SYCL2020/KernelBundle.cpp @@ -70,6 +70,11 @@ TEST(KernelBundle, GetKernelBundleFromKernel) { return; } + if (Plt.get_backend() == sycl::backend::rocm) { + std::cout << "Test is not supported on ROCm platform, skipping\n"; + return; + } + sycl::unittest::PiMock Mock{Plt}; setupDefaultMockAPIs(Mock); diff --git a/sycl/unittests/SYCL2020/SpecConstDefaultValues.cpp b/sycl/unittests/SYCL2020/SpecConstDefaultValues.cpp index 441dc8e3ce470..a3e84a801ea7f 100644 --- a/sycl/unittests/SYCL2020/SpecConstDefaultValues.cpp +++ b/sycl/unittests/SYCL2020/SpecConstDefaultValues.cpp @@ -82,6 +82,11 @@ TEST(SpecConstDefaultValues, DefaultValuesAreSet) { return; } + if (Plt.get_backend() == sycl::backend::rocm) { + std::cerr << "Test is not supported on ROCm platform, skipping\n"; + return; + } + sycl::unittest::PiMock Mock{Plt}; setupDefaultMockAPIs(Mock); @@ -116,6 +121,11 @@ TEST(SpecConstDefaultValues, DefaultValuesAreOverriden) { return; } + if (Plt.get_backend() == sycl::backend::rocm) { + std::cerr << "Test is not supported on ROCm platform, skipping\n"; + return; + } + sycl::unittest::PiMock Mock{Plt}; setupDefaultMockAPIs(Mock); diff --git a/sycl/unittests/misc/KernelBuildOptions.cpp b/sycl/unittests/misc/KernelBuildOptions.cpp index 91339553acd97..91d84e30c39de 100644 --- a/sycl/unittests/misc/KernelBuildOptions.cpp +++ b/sycl/unittests/misc/KernelBuildOptions.cpp @@ -223,6 +223,11 @@ TEST(KernelBuildOptions, KernelBundleBasic) { return; } + if (Plt.get_backend() == sycl::backend::rocm) { + std::cerr << "Test is not supported on ROCm platform, skipping\n"; + return; + } + sycl::unittest::PiMock Mock{Plt}; setupDefaultMockAPIs(Mock); @@ -258,6 +263,11 @@ TEST(KernelBuildOptions, Program) { return; } + if (Plt.get_backend() == sycl::backend::rocm) { + std::cerr << "Test is not supported on ROCm platform, skipping\n"; + return; + } + sycl::unittest::PiMock Mock{Plt}; setupDefaultMockAPIs(Mock); diff --git a/sycl/unittests/pi/BackendString.hpp b/sycl/unittests/pi/BackendString.hpp index 7f051f5ab6790..d573d4f48a3d8 100644 --- a/sycl/unittests/pi/BackendString.hpp +++ b/sycl/unittests/pi/BackendString.hpp @@ -12,6 +12,7 @@ inline const char *GetBackendString(cl::sycl::backend backend) { #define PI_BACKEND_STR(backend_name) \ case cl::sycl::backend::backend_name: \ return #backend_name + PI_BACKEND_STR(rocm); PI_BACKEND_STR(cuda); PI_BACKEND_STR(host); PI_BACKEND_STR(opencl); diff --git a/sycl/unittests/pi/EnqueueMemTest.cpp b/sycl/unittests/pi/EnqueueMemTest.cpp index fe007bcce8945..15a286059b22d 100644 --- a/sycl/unittests/pi/EnqueueMemTest.cpp +++ b/sycl/unittests/pi/EnqueueMemTest.cpp @@ -74,6 +74,12 @@ class EnqueueMemTest : public testing::TestWithParam { detail::plugin plugin = GetParam(); + if (plugin.getBackend() == sycl::backend::rocm && sizeof(T) > 4) { + std::cerr << "ROCm plugin doesn't support patterns larger than 4 bytes, " + "skipping\n"; + GTEST_SKIP(); + } + T inValues[_numElementsX] = {}; for (size_t i = 0; i < _numElementsX; ++i) { diff --git a/sycl/unittests/program_manager/itt_annotations.cpp b/sycl/unittests/program_manager/itt_annotations.cpp index 4b86bc239f266..625d90bf024b9 100644 --- a/sycl/unittests/program_manager/itt_annotations.cpp +++ b/sycl/unittests/program_manager/itt_annotations.cpp @@ -257,6 +257,11 @@ TEST(ITTNotify, UseKernelBundle) { return; } + if (Plt.get_backend() == sycl::backend::rocm) { + std::cerr << "Test is not supported on ROCm platform, skipping\n"; + return; + } + sycl::unittest::PiMock Mock{Plt}; setupDefaultMockAPIs(Mock); @@ -293,6 +298,11 @@ TEST(ITTNotify, VarNotSet) { return; } + if (Plt.get_backend() == sycl::backend::rocm) { + std::cerr << "Test is not supported on ROCm platform, skipping\n"; + return; + } + sycl::unittest::PiMock Mock{Plt}; setupDefaultMockAPIs(Mock); diff --git a/sycl/unittests/scheduler/RequiredWGSize.cpp b/sycl/unittests/scheduler/RequiredWGSize.cpp index 23607bfe0064c..3d4fb1ddffb90 100644 --- a/sycl/unittests/scheduler/RequiredWGSize.cpp +++ b/sycl/unittests/scheduler/RequiredWGSize.cpp @@ -240,6 +240,11 @@ static void performChecks() { return; } + if (Plt.get_backend() == sycl::backend::rocm) { + std::cerr << "Test is not supported on ROCm platform, skipping\n"; + return; + } + sycl::unittest::PiMock Mock{Plt}; setupDefaultMockAPIs(Mock);