From f2ea3783ba9bf0ff84cfc6bf959dc6b8a30f7e06 Mon Sep 17 00:00:00 2001 From: "Lee, Sang Ik" Date: Tue, 12 Sep 2023 23:51:03 +0000 Subject: [PATCH] [MLIR][Conversion] Add new option "use-opencl" for "convert-gpu-to-spirv" conversion pass Current convert-gpu-to-spirv pass has memory space to storage class map hardcoded as spirv::mapMemorySpaceToVulkanStorageClass A new option use-opencl is added to control changing that value to spirv::mapMemorySpaceToOpenCLStorageClass use-opencl's default value is false and spirv::mapMemorySpaceToVulkanStorageClass is set as the default memory space to storage class map when use-opencl is set to true, spirv::mapMemorySpaceToOpenCLStorageClass is used instead --- mlir/include/mlir/Conversion/Passes.td | 5 +- .../Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp | 3 +- .../GPUToSPIRV/load-store-opencl.mlir | 81 +++++++++++++++++++ 3 files changed, 87 insertions(+), 2 deletions(-) create mode 100644 mlir/test/Conversion/GPUToSPIRV/load-store-opencl.mlir diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td index 3218760931b8c..03cf905aaee21 100644 --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -567,7 +567,10 @@ def ConvertGPUToSPIRV : Pass<"convert-gpu-to-spirv", "ModuleOp"> { let options = [ Option<"use64bitIndex", "use-64bit-index", "bool", /*default=*/"false", - "Use 64-bit integers to convert index types"> + "Use 64-bit integers to convert index types">, + Option<"useOpenCL", "use-opencl", + "bool", /*default=*/"false", + "Use OpenCL instead of Vulkan"> ]; } diff --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp index d0ce58597f980..cce8bc4f5f974 100644 --- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp +++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp @@ -69,7 +69,8 @@ void GPUToSPIRVPass::runOnOperation() { std::unique_ptr target = spirv::getMemorySpaceToStorageClassTarget(*context); spirv::MemorySpaceToStorageClassMap memorySpaceMap = - spirv::mapMemorySpaceToVulkanStorageClass; + this->useOpenCL ? spirv::mapMemorySpaceToOpenCLStorageClass : + spirv::mapMemorySpaceToVulkanStorageClass; spirv::MemorySpaceToStorageClassConverter converter(memorySpaceMap); RewritePatternSet patterns(context); diff --git a/mlir/test/Conversion/GPUToSPIRV/load-store-opencl.mlir b/mlir/test/Conversion/GPUToSPIRV/load-store-opencl.mlir new file mode 100644 index 0000000000000..8cbb47d397e89 --- /dev/null +++ b/mlir/test/Conversion/GPUToSPIRV/load-store-opencl.mlir @@ -0,0 +1,81 @@ +// RUN: mlir-opt -convert-gpu-to-spirv='use-64bit-index=true use-opencl=true' %s -o - | FileCheck %s + +module attributes { + gpu.container_module, + spirv.target_env = #spirv.target_env< + #spirv.vce, api=OpenCL, #spirv.resource_limits<>> +} { + func.func @load_store(%arg0: memref<12x4xf32, #spirv.storage_class>, %arg1: memref<12x4xf32, #spirv.storage_class>, %arg2: memref<12x4xf32, #spirv.storage_class>) { + %c0 = arith.constant 0 : index + %c12 = arith.constant 12 : index + %0 = arith.subi %c12, %c0 : index + %c1 = arith.constant 1 : index + %c0_0 = arith.constant 0 : index + %c4 = arith.constant 4 : index + %1 = arith.subi %c4, %c0_0 : index + %c1_1 = arith.constant 1 : index + %c1_2 = arith.constant 1 : index + gpu.launch_func @kernels::@load_store_kernel + blocks in (%0, %c1_2, %c1_2) threads in (%1, %c1_2, %c1_2) + args(%arg0 : memref<12x4xf32, #spirv.storage_class>, %arg1 : memref<12x4xf32, #spirv.storage_class>, %arg2 : memref<12x4xf32, #spirv.storage_class>, + %c0 : index, %c0_0 : index, %c1 : index, %c1_1 : index) + return + } + + // CHECK-LABEL: spirv.module @{{.*}} Physical64 OpenCL + gpu.module @kernels { + // CHECK-DAG: spirv.GlobalVariable @[[WORKGROUPSIZEVAR:.*]] built_in("WorkgroupSize") : !spirv.ptr, Input> + // CHECK-DAG: spirv.GlobalVariable @[[NUMWORKGROUPSVAR:.*]] built_in("NumWorkgroups") : !spirv.ptr, Input> + // CHECK-DAG: spirv.GlobalVariable @[[$LOCALINVOCATIONIDVAR:.*]] built_in("LocalInvocationId") : !spirv.ptr, Input> + // CHECK-DAG: spirv.GlobalVariable @[[$WORKGROUPIDVAR:.*]] built_in("WorkgroupId") : !spirv.ptr, Input> + // CHECK-LABEL: spirv.func @load_store_kernel + // CHECK-SAME: %[[ARG0:[^\s]+]]: !spirv.ptr, CrossWorkgroup> + // CHECK-SAME: %[[ARG1:[^\s]+]]: !spirv.ptr, CrossWorkgroup> + // CHECK-SAME: %[[ARG2:[^\s]+]]: !spirv.ptr, CrossWorkgroup> + gpu.func @load_store_kernel(%arg0: memref<12x4xf32, #spirv.storage_class>, %arg1: memref<12x4xf32, #spirv.storage_class>, %arg2: memref<12x4xf32, #spirv.storage_class>, %arg3: index, %arg4: index, %arg5: index, %arg6: index) kernel + attributes {gpu.known_block_size = array, gpu.known_grid_size = array, spirv.entry_point_abi = #spirv.entry_point_abi<>} { + // CHECK: %[[ADDRESSWORKGROUPID:.*]] = spirv.mlir.addressof @[[$WORKGROUPIDVAR]] + // CHECK: %[[WORKGROUPID:.*]] = spirv.Load "Input" %[[ADDRESSWORKGROUPID]] + // CHECK: %[[WORKGROUPIDX:.*]] = spirv.CompositeExtract %[[WORKGROUPID]]{{\[}}0 : i32{{\]}} + // CHECK: %[[ADDRESSLOCALINVOCATIONID:.*]] = spirv.mlir.addressof @[[$LOCALINVOCATIONIDVAR]] + // CHECK: %[[LOCALINVOCATIONID:.*]] = spirv.Load "Input" %[[ADDRESSLOCALINVOCATIONID]] + // CHECK: %[[LOCALINVOCATIONIDX:.*]] = spirv.CompositeExtract %[[LOCALINVOCATIONID]]{{\[}}0 : i32{{\]}} + %0 = gpu.block_id x + %1 = gpu.block_id y + %2 = gpu.block_id z + %3 = gpu.thread_id x + %4 = gpu.thread_id y + %5 = gpu.thread_id z + %6 = gpu.grid_dim x + %7 = gpu.grid_dim y + %8 = gpu.grid_dim z + %9 = gpu.block_dim x + %10 = gpu.block_dim y + %11 = gpu.block_dim z + // CHECK: %[[INDEX1:.*]] = spirv.IAdd + %12 = arith.addi %arg3, %0 : index + // CHECK: %[[INDEX2:.*]] = spirv.IAdd + %13 = arith.addi %arg4, %3 : index + // CHECK: %[[OFFSET1_0:.*]] = spirv.Constant 0 : i64 + // CHECK: %[[STRIDE1_1:.*]] = spirv.Constant 4 : i64 + // CHECK: %[[UPDATE1_1:.*]] = spirv.IMul %[[STRIDE1_1]], %[[INDEX1]] : i64 + // CHECK: %[[OFFSET1_1:.*]] = spirv.IAdd %[[OFFSET1_0]], %[[UPDATE1_1]] : i64 + // CHECK: %[[STRIDE1_2:.*]] = spirv.Constant 1 : i64 + // CHECK: %[[UPDATE1_2:.*]] = spirv.IMul %[[STRIDE1_2]], %[[INDEX2]] : i64 + // CHECK: %[[OFFSET1_2:.*]] = spirv.IAdd %[[OFFSET1_1]], %[[UPDATE1_2]] : i64 + // CHECK: %[[PTR1:.*]] = spirv.AccessChain %[[ARG0]]{{\[}}%[[OFFSET1_2]]{{\]}} : !spirv.ptr, CrossWorkgroup>, i64 + // CHECK-NEXT: %[[VAL1:.*]] = spirv.Load "CrossWorkgroup" %[[PTR1]] : f32 + %14 = memref.load %arg0[%12, %13] : memref<12x4xf32, #spirv.storage_class> + // CHECK: %[[PTR2:.*]] = spirv.AccessChain %[[ARG1]]{{\[}}{{%.*}}{{\]}} + // CHECK-NEXT: %[[VAL2:.*]] = spirv.Load "CrossWorkgroup" %[[PTR2]] + %15 = memref.load %arg1[%12, %13] : memref<12x4xf32, #spirv.storage_class> + // CHECK: %[[VAL3:.*]] = spirv.FAdd %[[VAL1]], %[[VAL2]] : f32 + %16 = arith.addf %14, %15 : f32 + // CHECK: %[[PTR3:.*]] = spirv.AccessChain %[[ARG2]]{{\[}}{{%.*}}{{\]}} + // CHECK: spirv.Store "CrossWorkgroup" %[[PTR3]], %[[VAL3]] : f32 + memref.store %16, %arg2[%12, %13] : memref<12x4xf32, #spirv.storage_class> + // CHECK: spirv.Return + gpu.return + } + } +}