From c5af6f38007c78033fe910344d86bef15eeca36f Mon Sep 17 00:00:00 2001 From: Facebook Community Bot <6422482+facebook-github-bot@users.noreply.github.com> Date: Mon, 3 Jul 2023 10:21:49 -0700 Subject: [PATCH] Re-sync with internal repository --- backends/backends.bzl | 23 + backends/qnnpack/targets.bzl | 104 +++ backends/qnnpack/test/targets.bzl | 26 + backends/targets.bzl | 33 + backends/test/demos/rpc/targets.bzl | 48 + backends/xnnpack/targets.bzl | 65 ++ build/runtime_wrapper.bzl | 634 ++++++++++++++ codegen/codegen.bzl | 443 ++++++++++ codegen/targets.bzl | 22 + codegen/tools/targets.bzl | 70 ++ compiler/targets.bzl | 16 + configurations/targets.bzl | 28 + core/kernel_types/lean/targets.bzl | 59 ++ core/kernel_types/lean/test/targets.bzl | 39 + core/kernel_types/targets.bzl | 35 + core/kernel_types/test/targets.bzl | 16 + core/kernel_types/testing/targets.bzl | 75 ++ core/kernel_types/util/targets.bzl | 70 ++ core/kernel_types/util/test/targets.bzl | 46 + core/prim_ops/targets.bzl | 44 + core/prim_ops/test/targets.bzl | 20 + core/targets.bzl | 130 +++ core/test/targets.bzl | 133 +++ core/values/targets.bzl | 36 + core/values/test/targets.bzl | 28 + executor/targets.bzl | 107 +++ executor/test/targets.bzl | 170 ++++ exir/targets.bzl | 32 + kernels/aten/targets.bzl | 52 ++ kernels/aten/test/targets.bzl | 9 + kernels/optimized/cpu/targets.bzl | 103 +++ kernels/optimized/lib_defs.bzl | 135 +++ kernels/optimized/op_registration_util.bzl | 115 +++ kernels/optimized/targets.bzl | 54 ++ kernels/optimized/test/targets.bzl | 43 + kernels/portable/cpu/pattern/targets.bzl | 25 + kernels/portable/cpu/targets.bzl | 828 ++++++++++++++++++ kernels/portable/cpu/test/targets.bzl | 20 + kernels/portable/cpu/util/targets.bzl | 89 ++ kernels/portable/cpu/util/test/targets.bzl | 23 + kernels/portable/op_registration_util.bzl | 165 ++++ kernels/portable/targets.bzl | 119 +++ kernels/portable/test/targets.bzl | 12 + kernels/quantized/cpu/targets.bzl | 43 + kernels/quantized/targets.bzl | 47 + kernels/quantized/test/targets.bzl | 27 + kernels/targets.bzl | 47 + .../test/custom_kernel_example/targets.bzl | 50 ++ kernels/test/custom_kernel_example/tests.bzl | 28 + kernels/test/targets.bzl | 276 ++++++ kernels/test/util.bzl | 141 +++ platform/targets.bzl | 48 + platform/test/targets.bzl | 60 ++ profiler/profiler.bzl | 22 + profiler/targets.bzl | 37 + pybindings/targets.bzl | 89 ++ pytree/targets.bzl | 22 + schema/targets.bzl | 144 +++ schema/test/targets.bzl | 18 + sdk/etdump/targets.bzl | 171 ++++ sdk/etdump/tests/targets.bzl | 21 + test/models/targets.bzl | 148 ++++ test/targets.bzl | 129 +++ test/utils/targets.bzl | 45 + threadpool/targets.bzl | 43 + threadpool/test/targets.bzl | 18 + util/targets.bzl | 213 +++++ util/test/targets.bzl | 121 +++ 68 files changed, 6352 insertions(+) create mode 100644 backends/backends.bzl create mode 100644 backends/qnnpack/targets.bzl create mode 100644 backends/qnnpack/test/targets.bzl create mode 100644 backends/targets.bzl create mode 100644 backends/test/demos/rpc/targets.bzl create mode 100644 backends/xnnpack/targets.bzl create mode 100644 build/runtime_wrapper.bzl create mode 100644 codegen/codegen.bzl create mode 100644 codegen/targets.bzl create mode 100644 codegen/tools/targets.bzl create mode 100644 compiler/targets.bzl create mode 100644 configurations/targets.bzl create mode 100644 core/kernel_types/lean/targets.bzl create mode 100644 core/kernel_types/lean/test/targets.bzl create mode 100644 core/kernel_types/targets.bzl create mode 100644 core/kernel_types/test/targets.bzl create mode 100644 core/kernel_types/testing/targets.bzl create mode 100644 core/kernel_types/util/targets.bzl create mode 100644 core/kernel_types/util/test/targets.bzl create mode 100644 core/prim_ops/targets.bzl create mode 100644 core/prim_ops/test/targets.bzl create mode 100644 core/targets.bzl create mode 100644 core/test/targets.bzl create mode 100644 core/values/targets.bzl create mode 100644 core/values/test/targets.bzl create mode 100644 executor/targets.bzl create mode 100644 executor/test/targets.bzl create mode 100644 exir/targets.bzl create mode 100644 kernels/aten/targets.bzl create mode 100644 kernels/aten/test/targets.bzl create mode 100644 kernels/optimized/cpu/targets.bzl create mode 100644 kernels/optimized/lib_defs.bzl create mode 100644 kernels/optimized/op_registration_util.bzl create mode 100644 kernels/optimized/targets.bzl create mode 100644 kernels/optimized/test/targets.bzl create mode 100644 kernels/portable/cpu/pattern/targets.bzl create mode 100644 kernels/portable/cpu/targets.bzl create mode 100644 kernels/portable/cpu/test/targets.bzl create mode 100644 kernels/portable/cpu/util/targets.bzl create mode 100644 kernels/portable/cpu/util/test/targets.bzl create mode 100644 kernels/portable/op_registration_util.bzl create mode 100644 kernels/portable/targets.bzl create mode 100644 kernels/portable/test/targets.bzl create mode 100644 kernels/quantized/cpu/targets.bzl create mode 100644 kernels/quantized/targets.bzl create mode 100644 kernels/quantized/test/targets.bzl create mode 100644 kernels/targets.bzl create mode 100644 kernels/test/custom_kernel_example/targets.bzl create mode 100644 kernels/test/custom_kernel_example/tests.bzl create mode 100644 kernels/test/targets.bzl create mode 100644 kernels/test/util.bzl create mode 100644 platform/targets.bzl create mode 100644 platform/test/targets.bzl create mode 100644 profiler/profiler.bzl create mode 100644 profiler/targets.bzl create mode 100644 pybindings/targets.bzl create mode 100644 pytree/targets.bzl create mode 100644 schema/targets.bzl create mode 100644 schema/test/targets.bzl create mode 100644 sdk/etdump/targets.bzl create mode 100644 sdk/etdump/tests/targets.bzl create mode 100644 test/models/targets.bzl create mode 100644 test/targets.bzl create mode 100644 test/utils/targets.bzl create mode 100644 threadpool/targets.bzl create mode 100644 threadpool/test/targets.bzl create mode 100644 util/targets.bzl create mode 100644 util/test/targets.bzl diff --git a/backends/backends.bzl b/backends/backends.bzl new file mode 100644 index 00000000000..7d8d9b6cae3 --- /dev/null +++ b/backends/backends.bzl @@ -0,0 +1,23 @@ +def get_all_cpu_backend_targets(): + """Returns a list of all CPU backend targets. + + For experimenting and testing, not for production, since it will typically + include more than necessary for a particular product. + """ + return [ + "//executorch/backends/xnnpack:xnnpack_backend", + "//executorch/backends/qnnpack:qnnpack_backend", + ] + +def get_all_cpu_aot_and_backend_targets(): + """Returns a list of all CPU backend targets with aot (ahead of time). + + For experimenting and testing, not for production, since it will typically + include more than necessary for a particular product. + """ + return [ + "//executorch/backends/xnnpack:xnnpack_preprocess", + "//executorch/backends/xnnpack/partition:xnnpack_partitioner", + "//executorch/backends/qnnpack:qnnpack_preprocess", + "//executorch/backends/qnnpack/partition:qnnpack_partitioner", + ] + get_all_cpu_backend_targets() diff --git a/backends/qnnpack/targets.bzl b/backends/qnnpack/targets.bzl new file mode 100644 index 00000000000..3d25bc29061 --- /dev/null +++ b/backends/qnnpack/targets.bzl @@ -0,0 +1,104 @@ +load( + "@fbsource//tools/build_defs:default_platform_defs.bzl", + "ANDROID", + "APPLE", + "CXX", +) +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + runtime.genrule( + name = "gen_qnnpack_schema", + srcs = [ + "serialization/schema.fbs", + ], + # We're only generating a single file, so it seems like we could use + # `out`, but `flatc` takes a directory as a parameter, not a single + # file. Use `outs` so that `${OUT}` is expanded as the containing + # directory instead of the file itself. + outs = { + "qnnpack_schema_generated.h": ["schema_generated.h"], + }, + cmd = " ".join([ + "$(exe fbsource//third-party/flatbuffers/fbsource_namespace:flatc)", + "--cpp", + "--cpp-std c++11", + "--scoped-enums", + "-o ${OUT}", + "${SRCS}", + ]), + default_outs = ["."], + ) + + runtime.cxx_library( + name = "qnnpack_schema", + srcs = [], + exported_headers = { + "qnnpack_schema_generated.h": ":gen_qnnpack_schema[qnnpack_schema_generated.h]", + }, + exported_deps = [ + "fbsource//third-party/flatbuffers/fbsource_namespace:flatbuffers-api", + ], + ) + + for aten_mode in (True, False): + aten_suffix = "_aten" if aten_mode else "" + runtime.cxx_library( + name = "qnnpack_utils" + aten_suffix, + srcs = [ + "utils/utils.cpp", + ], + exported_headers = ["utils/utils.h"], + deps = [ + "//executorch/core/kernel_types:kernel_types" + aten_suffix, + "//executorch/backends:backend", + ], + visibility = [ + "//executorch/backends/qnnpack/test/...", + "//executorch/backends/xnnpack/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + runtime.cxx_library( + name = "qnnpack_backend", + srcs = [ + "QNNPackBackend.cpp", + ], + headers = [ + "executor/QNNExecutor.h", + ], + resources = [ + "serialization/schema.fbs", + ], + visibility = [ + "//executorch/backends:backend_lib", + "//executorch/backends/qnnpack/test/...", + "//executorch/backends/test/...", + "//executorch/pybindings/...", + "@EXECUTORCH_CLIENTS", + ], + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/backends:backend", + "//executorch/threadpool:threadpool", + "//executorch/util:memory_utils", + ":qnnpack_schema", + ":qnnpack_utils", + ], + platforms = [ + ANDROID, + APPLE, + CXX, + ], + fbcode_deps = [ + "//caffe2/aten/src/ATen/native/quantized/cpu/qnnpack:pytorch_qnnpack", + ], + xplat_deps = [ + "//xplat/caffe2/aten/src/ATen/native/quantized/cpu/qnnpack:pytorch_qnnpack", + ], + # XnnpackBackend.cpp needs to compile with executor as whole + # @lint-ignore BUCKLINT: Avoid `link_whole=True` (https://fburl.com/avoid-link-whole) + link_whole = True, + ) diff --git a/backends/qnnpack/test/targets.bzl b/backends/qnnpack/test/targets.bzl new file mode 100644 index 00000000000..ac3080fcd73 --- /dev/null +++ b/backends/qnnpack/test/targets.bzl @@ -0,0 +1,26 @@ +load( + "@fbsource//tools/build_defs:default_platform_defs.bzl", + "ANDROID", + "APPLE", + "CXX", +) +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + runtime.cxx_test( + name = "qnnpack_utils_test", + srcs = ["test_utils.cpp"], + fbcode_deps = [ + "//caffe2:ATen-cpu", + ], + xplat_deps = [ + "//caffe2:aten_cpu", + ], + platforms = [ANDROID, APPLE, CXX], + deps = [ + "//executorch/core/kernel_types/testing:tensor_util", + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/util:aten_bridge", + "//executorch/backends/qnnpack:qnnpack_utils", + ], + ) diff --git a/backends/targets.bzl b/backends/targets.bzl new file mode 100644 index 00000000000..da2ed026fed --- /dev/null +++ b/backends/targets.bzl @@ -0,0 +1,33 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + for aten_mode in (True, False): + aten_suffix = ("_aten" if aten_mode else "") + runtime.cxx_library( + name = "backend" + aten_suffix, + srcs = [ + "backend.cpp", + ], + exported_headers = [ + "backend.h", + ], + preprocessor_flags = ["-DUSE_ATEN_LIB"] if aten_mode else [], + visibility = [ + "//executorch/...", + "//executorch/test/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/core:core", + "//executorch/core/values:executor_values" + aten_suffix, + "//executorch/core:freeable_buffer", + "//executorch/executor:memory_manager", + "//executorch/profiler:profiler", + ], + ) diff --git a/backends/test/demos/rpc/targets.bzl b/backends/test/demos/rpc/targets.bzl new file mode 100644 index 00000000000..2ee46b0472e --- /dev/null +++ b/backends/test/demos/rpc/targets.bzl @@ -0,0 +1,48 @@ +load( + "@fbsource//tools/build_defs:default_platform_defs.bzl", + "ANDROID", + "CXX", +) +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/pybindings:targets.bzl", "MODELS_ALL_OPS_LEAN_MODE_GENERATED_LIB") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "executor_backend", + srcs = [ + "ExecutorBackend.cpp", + ], + exported_headers = [ + "ExecutorBackend.h", + ], + platforms = [ANDROID, CXX], + deps = [ + "//executorch/executor:executor", + "//executorch/kernels/portable:generated_lib", + "//executorch/backends:backend", + "//executorch/util:embedded_data_loader", + "//executorch/util:util", + ] + MODELS_ALL_OPS_LEAN_MODE_GENERATED_LIB, + exported_deps = [ + "//executorch/core:core", + ], + ) + + runtime.cxx_library( + name = "executor_backend_register", + srcs = [ + "ExecutorBackendRegister.cpp", + ], + deps = [ + ":executor_backend", + "//executorch/backends:backend", + "//executorch/core:core", + ], + platforms = [ANDROID, CXX], + ) diff --git a/backends/xnnpack/targets.bzl b/backends/xnnpack/targets.bzl new file mode 100644 index 00000000000..b48131a554d --- /dev/null +++ b/backends/xnnpack/targets.bzl @@ -0,0 +1,65 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + runtime.genrule( + name = "gen_xnnpack_schema", + srcs = [ + "serialization/schema.fbs", + ], + # We're only generating a single file, so it seems like we could use + # `out`, but `flatc` takes a directory as a parameter, not a single + # file. Use `outs` so that `${OUT}` is expanded as the containing + # directory instead of the file itself. + outs = { + "xnnpack_schema_generated.h": ["schema_generated.h"], + }, + cmd = " ".join([ + "$(exe fbsource//third-party/flatbuffers/fbsource_namespace:flatc)", + "--cpp", + "--cpp-std c++11", + "--scoped-enums", + "-o ${OUT}", + "${SRCS}", + ]), + default_outs = ["."], + ) + + runtime.cxx_library( + name = "xnnpack_schema", + srcs = [], + exported_headers = { + "xnnpack_schema_generated.h": ":gen_xnnpack_schema[xnnpack_schema_generated.h]", + }, + exported_deps = [ + "fbsource//third-party/flatbuffers/fbsource_namespace:flatbuffers-api", + ], + ) + + runtime.cxx_library( + name = "xnnpack_backend", + srcs = native.glob([ + "runtime/*.cpp", + ]), + headers = native.glob([ + "runtime/*.h", + ]), + visibility = [ + "//executorch/backends:backend_lib", + "//executorch/backends/test/...", + "//executorch/backends/xnnpack/test/...", + "//executorch/pybindings/...", + "@EXECUTORCH_CLIENTS", + ], + deps = [ + "//xplat/third-party/XNNPACK:XNNPACK", + ":xnnpack_schema", + "//executorch/backends:backend", + "//executorch/backends/qnnpack:qnnpack_utils", # TODO Use (1) portable for choose_qparams(), (2) xnnpack for quantize_per_tensor() + "//executorch/threadpool:threadpool", + "//executorch/util:memory_utils", + "//executorch/core/kernel_types/util:tensor_util", + ], + # XnnpackBackend.cpp needs to compile with executor as whole + # @lint-ignore BUCKLINT: Avoid `link_whole=True` (https://fburl.com/avoid-link-whole) + link_whole = True, + ) diff --git a/build/runtime_wrapper.bzl b/build/runtime_wrapper.bzl new file mode 100644 index 00000000000..9baa95b3ce7 --- /dev/null +++ b/build/runtime_wrapper.bzl @@ -0,0 +1,634 @@ +"""Common macros to build Executorch runtime targets in both fbcode and xplat. + +For directories that contain code which must be built for both fbcode and xplat, +the expected pattern is to create: + +- A `targets.bzl` file that uses the macros in this file + (`runtime_wrapper.bzl`) to define a function named `define_common_targets()`. + This function should define all of the build targets in terms of rules in the + `runtime` struct below. + + The `targets.bzl` file must load this file from xplat (not fbcode), like + load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + to avoid a problematic dependency from xplat -> fbcode when building in xplat. + +- A TARGETS file and a BUILD file which both contain: + + load(":targets.bzl", "define_common_targets") + define_common_targets() + +If a given directory also needs to define a fbcode-only build target as well +as the common targets, it should define that rule directly in the TARGETS file +below the call to `define_common_targets()`. Similar for xplat-only build +targets and BUCK files. + +Note that fbcode-only directories do not need to use these wrappers, and can +use TARGETS files normally. Same for xplat-only directories and BUCK files. +""" + +load("@fbcode_macros//build_defs:cpp_binary.bzl", "cpp_binary") +load("@fbcode_macros//build_defs:cpp_library.bzl", "cpp_library") +load("@fbcode_macros//build_defs:cpp_python_extension.bzl", "cpp_python_extension") +load("@fbcode_macros//build_defs:cpp_unittest.bzl", "cpp_unittest") +load("@fbcode_macros//build_defs:export_files.bzl", "export_file") +load("@fbcode_macros//build_defs:native_rules.bzl", "buck_filegroup", "buck_genrule") +load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary") +load("@fbcode_macros//build_defs:python_library.bzl", "python_library") +load("@fbcode_macros//build_defs:python_unittest.bzl", "python_unittest") +load("@fbsource//tools/build_defs:cell_defs.bzl", "get_fbsource_cell") +load( + "@fbsource//tools/build_defs:default_platform_defs.bzl", + "ANDROID", + "APPLE", + "CXX", + "IOS", + "MACOSX", +) +load("@fbsource//tools/build_defs:fb_cxx_python_extension.bzl", "fb_cxx_python_extension") +load("@fbsource//tools/build_defs:fb_native_wrapper.bzl", "fb_native") +load("@fbsource//tools/build_defs:fb_python_binary.bzl", "fb_python_binary") +load("@fbsource//tools/build_defs:fb_python_library.bzl", "fb_python_library") +load("@fbsource//tools/build_defs:fb_python_test.bzl", "fb_python_test") +load("@fbsource//tools/build_defs:fb_xplat_cxx_binary.bzl", "fb_xplat_cxx_binary") +load("@fbsource//tools/build_defs:fb_xplat_cxx_library.bzl", "fb_xplat_cxx_library") +load("@fbsource//tools/build_defs:fb_xplat_cxx_test.bzl", "fb_xplat_cxx_test") +load("@fbsource//tools/build_defs:fbsource_utils.bzl", "is_fbcode", "is_xplat") +load("@fbsource//tools/build_defs:selects.bzl", "selects") +load("@fbsource//tools/build_defs:type_defs.bzl", "is_dict", "is_list", "is_string", "is_tuple", "is_unicode") +load("//tools/build_defs:fb_xplat_genrule.bzl", "fb_xplat_genrule") +load(":clients.bzl", "EXECUTORCH_CLIENTS") + +# Platforms that we currently support builds for +_DEFAULT_PLATFORMS = (CXX, ANDROID, APPLE) + +_DEFAULT_APPLE_SDKS = (IOS, MACOSX) + +# Root directories in fbcode that we need to convert to //xplat paths. +_ET_TARGET_PREFIXES = ("executorch", "pye", "caffe2") + +def get_default_executorch_platforms(): + return _DEFAULT_PLATFORMS + +def _xplat_coerce_platforms(platforms): + """Make sure `platforms` is a subset of _DEFAULT_PLATFORMS. If `platforms` + is None, returns _DEFAULT_PLATFORMS. + + When building for xplat, platforms can be CXX, FBCODE, ANDROID, etc. But + the only platforms ExecuTorch support currently is _DEFAULT_PLATFORMS. + + Args: + platforms: The xplat platforms from https://fburl.com/code/fm8nq6k0 + + Returns: + The possibly-modified platforms that ExecuTorch supports. + """ + if platforms != None: + # if platforms is just a str/unicode, turn it into a list + if is_string(platforms) or is_unicode(platforms): + platforms = [platforms] + + if not is_tuple(platforms) and not is_list(platforms): + fail("Unsupported platforms of type {}".format(type(platforms))) + + for platform in platforms: + if platform not in _DEFAULT_PLATFORMS: + fail("Only {} are supported; got {} instead".format( + _DEFAULT_PLATFORMS, + platforms, + )) + + # if platforms is provided and it's a subset of _DEFAULT_PLATFORMS, then + # it's okay to use it. Just return it. + return platforms + + # if platforms is not provided, use the _DEFAULT_PLATFORMS + return _DEFAULT_PLATFORMS + +def _xplat_coerce_apple_sdks(platforms, apple_sdks): + """Make sure `apple_sdks` is a subset of _DEFAULT_APPLE_SDKS. If `apple_sdks` + is None and platforms contains APPLE, returns _DEFAULT_APPLE_SDKS. + + When building for APPLE, apple_sdks can be IOS, MACOSX, APPLETVOS, etc. But + the only sdks ExecuTorch support currently is _DEFAULT_APPLE_SDKS. + + Args: + platforms: The platforms for the rule we are adding apple_sdks too + apple_sdks: The apple sdks from https://fburl.com/code/n38zqdsh + + Returns: + The possibly-modified apple_sdks that ExecuTorch supports. + """ + if apple_sdks != None: + if APPLE not in platforms: + fail("apple_sdks can only be specified if APPLE is in platforms, instead found {}".format( + platforms, + )) + + # if apple_sdks is just a str/unicode, turn it into a list + if is_string(apple_sdks) or is_unicode(apple_sdks): + apple_sdks = [apple_sdks] + + if not is_tuple(apple_sdks) and not is_list(apple_sdks): + fail("Unsupported apple_sdks of type {}".format(type(apple_sdks))) + + for sdk in apple_sdks: + if sdk not in _DEFAULT_APPLE_SDKS: + fail("Only {} are supported; got {} instead".format( + _DEFAULT_APPLE_SDKS, + apple_sdks, + )) + + # if apple_sdks is provided and it's a subset of _DEFAULT_APPLE_SDKS, then + # it's okay to use it. Just return it. + return apple_sdks + + # if apple_sdks is not provided, use the _DEFAULT_APPLE_SDKS + return _DEFAULT_APPLE_SDKS if APPLE in platforms else [] + +def _patch_platforms(kwargs): + """Patches platforms and apple_sdks in kwargs based on is_xplat() or is_fbcode() + + platforms and apple_sdks are only supported when building in xplat, not in fbcode. This calls + _xplat_coerce_platforms for xplat and removes `platforms` and 'apple_sdks' for fbcode. + + Args: + kwargs: The `kwargs` parameter from a rule. + + Returns: + The possibly-modified `kwargs` parameter. + """ + if is_xplat(): + kwargs["platforms"] = _xplat_coerce_platforms(kwargs.get("platforms", None)) + kwargs["apple_sdks"] = _xplat_coerce_apple_sdks(kwargs.get("platforms"), kwargs.get("apple_sdks", None)) + elif is_fbcode(): + if "platforms" in kwargs: + kwargs.pop("platforms") + if "apple_sdks" in kwargs: + kwargs.pop("apple_sdks") + else: + _fail_unknown_environment() + return kwargs + +def _remove_platform_specific_args(kwargs): + """Removes platform specific arguments for FBCode builds + + Args such as *_platform_preprocessor_flags and *_platform_deps are not + supported by FBCode builds. Remove them from kwargs if building for FBCode. + + Args: + kwargs: The `kwargs` parameter from a rule. + + Returns: + The possibly-modified `kwargs` parameter. + """ + if is_fbcode(): + # Platform based Preprocessor flags + if "cxx_platform_preprocessor_flags" in kwargs: + kwargs.pop("cxx_platform_preprocessor_flags") + if "fbandroid_platform_preprocessor_flags" in kwargs: + kwargs.pop("fbandroid_platform_preprocessor_flags") + + # Platform based dependencies + if "cxx_platform_deps" in kwargs: + kwargs.pop("cxx_platform_deps") + if "fbandroid_platform_deps" in kwargs: + kwargs.pop("fbandroid_platform_deps") + +def _start_with_et_targets(target): + for prefix in _ET_TARGET_PREFIXES: + prefix = "//" + prefix + for suffix in ("/", ":"): + if target.startswith(prefix + suffix): + return True + return False + +def _fail_unknown_environment(): + fail("Only fbcode and xplat are supported; saw \"{}//{}\"".format( + get_fbsource_cell(), + native.package_name(), + )) + +def _patch_executorch_references(targets, use_static_deps = False): + """Patches up references to "//executorch/..." in lists of build targets. + + References to targets under `executorch` (in + deps/exported_deps/visibility/etc.) must be specified as `//executorch/...` + in the targets.bzl file. When building for xplat, rewrite them as + `//xplat/executorch/...`. + + Args: + targets: A list of build target strings to fix up. Not modified in + place. + use_static_deps: Whether this target should depend on static executorch + targets when building in xplat. + + Returns: + The possibly-different list of targets. + """ + if not targets: + return targets + out_targets = [] + for target in targets: + if target.startswith("//xplat/executorch"): + fail("References to executorch build targets must use " + + "`//executorch`, not `//xplat/executorch`") + if is_xplat(): + if _start_with_et_targets(target): + target = target.replace("//", "//xplat/", 1) + if use_static_deps and not target.endswith("..."): + target = target + "_static" + elif use_static_deps and target.startswith(":"): + target = target + "_static" + out_targets.append(target) + return out_targets + +def _patch_executorch_genrule_cmd(cmd, macros_only = True): + """Patches references to //executorch in genrule commands. + + When building for xplat, rewrites substrings like `//executorch/` or + `//executorch:` and replaces them with `//xplat/executorch[/:]`. + + If `macros_only` is True, only rewrites substrings in `$(exe)` or + `$(location)` macros. + + Args: + cmd: The `cmd` string from a genrule. + macros_only: Only modify strings inside certain `$()` macros. + + Returns: + The possibly-modified command. + """ + if not cmd: + return cmd + if is_xplat(): + if macros_only: + # Replace all macro references in the command. This is fragile + # because it assumes that there is exactly one space character + # between the macro name and the path, but it's easier to fix the + # input than to add complexity here. + for macro in ("location", "exe"): + for c in (":", "/"): + for prefix in _ET_TARGET_PREFIXES: + cmd = cmd.replace( + "$({macro} //{prefix}{c}".format( + macro = macro, + prefix = prefix, + c = c, + ), + "$({macro} //xplat/{prefix}{c}".format( + macro = macro, + prefix = prefix, + c = c, + ), + ) + else: + # Replace all references, even outside of macros. + for c in (":", "/"): + for prefix in _ET_TARGET_PREFIXES: + cmd = cmd.replace( + "//{prefix}{c}".format(prefix = prefix, c = c), + "//xplat/{prefix}{c}".format(prefix = prefix, c = c), + ) + return cmd + +def _patch_build_mode_flags(kwargs): + """Applies modifications to the `compiler_flags` kwargs based on build mode. + + Args: + kwargs: The `kwargs` parameter from a rule. + + Returns: + The possibly-modified `kwargs` parameter for chaining. + """ + build_mode = native.read_config("fbcode", "build_mode_test_label", "") + flags = [] + + # Base build modes. + if build_mode.startswith("dev"): + flags.append("-D__ET_BUILD_MODE_DEV=1") + elif build_mode.startswith("opt"): + flags.append("-D__ET_BUILD_MODE_OPT=1") + elif build_mode.startswith("dbgo"): + flags.append("-D__ET_BUILD_MODE_DBGO=1") + elif build_mode.startswith("dbg"): + flags.append("-D__ET_BUILD_MODE_DBG=1") + + # Build mode extensions. + if "-cov" in build_mode: + flags.append("-D__ET_BUILD_MODE_COV=1") + elif "-asan" in build_mode: + flags.append("-D__ET_BUILD_MODE_ASAN=1") + elif "-tsan" in build_mode: + flags.append("-D__ET_BUILD_MODE_TSAN=1") + elif "-ubsan" in build_mode: + flags.append("-D__ET_BUILD_MODE_UBSAN=1") + elif "-lto" in build_mode: + flags.append("-D__ET_BUILD_MODE_LTO=1") + + if "compiler_flags" not in kwargs: + kwargs["compiler_flags"] = [] + + # kwargs["compiler_flags"].extend(flags) or kwargs["compiler_flags"] += would + # fail if kwargs["compiler_flags"] is Immutable (ex: the default argument of + # a Buck macro) + kwargs["compiler_flags"] = kwargs["compiler_flags"] + flags + + return kwargs + +def _patch_platform_build_mode_flags(kwargs): + # aten mode must be built with support for exceptions on ios and android. + flags = [] + if "_aten" or "aten_" in kwargs["name"]: + flags.append("-D__ET_ATEN=1") + if "fbandroid_compiler_flags" not in kwargs: + kwargs["fbandroid_compiler_flags"] = [] + if "fbobjc_compiler_flags" not in kwargs: + kwargs["fbobjc_compiler_flags"] = [] + if "fbobjc_macosx_compiler_flags" not in kwargs: + kwargs["fbobjc_macosx_compiler_flags"] = [] + ios_android_flags = ["-fexceptions"] + kwargs["fbandroid_compiler_flags"].extend(ios_android_flags) + kwargs["fbobjc_compiler_flags"].extend(ios_android_flags) + kwargs["fbobjc_macosx_compiler_flags"].extend(ios_android_flags) + return kwargs + +def _patch_test_compiler_flags(kwargs): + if "compiler_flags" not in kwargs: + kwargs["compiler_flags"] = [] + + # Relaxing some constraints for tests + kwargs["compiler_flags"].extend(["-Wno-missing-prototypes", "-Wno-unused-variable", "-Wno-error"]) + return kwargs + +def _patch_kwargs_common(kwargs): + """Applies modifications to kwargs for all rule types. + + Returns the possibly-modified `kwargs` parameter for chaining. + """ + + # Be careful about dependencies on executorch targets for now, so that we + # don't pick up unexpected clients while things are still in flux. + if not kwargs.pop("_is_external_target", False): + for target in kwargs.get("visibility", []): + if not (target.startswith("//executorch") or target.startswith("@")): + fail("Please manage all external visibility using the " + + "EXECUTORCH_CLIENTS list in //executorch/build/clients.bzl. " + + "Found external visibility target \"{}\".".format(target)) + else: + kwargs.pop("_is_external_target", None) + + if is_xplat(): + # xplat doesn't support external_deps and exported_external_deps + if "external_deps" in kwargs: + kwargs.pop("external_deps") + if "exported_external_deps" in kwargs: + kwargs.pop("exported_external_deps") + + # Append repo-specific preprocessor_flags. + for pp_type in ("preprocessor_flags", "exported_preprocessor_flags"): + if is_xplat(): + extra_pp_flags = kwargs.pop("xplat_" + pp_type, []) + kwargs.pop("fbcode_" + pp_type, None) # Also remove the other one. + elif is_fbcode(): + extra_pp_flags = kwargs.pop("fbcode_" + pp_type, []) + kwargs.pop("xplat_" + pp_type, None) # Also remove the other one. + else: + extra_pp_flags = [] # Silence a not-initialized warning. + _fail_unknown_environment() + if extra_pp_flags: + # This should work even with select() elements. + kwargs[pp_type] = kwargs.get(pp_type, []) + extra_pp_flags + + # Append repo-specific deps. + for dep_type in ("deps", "exported_deps"): + if is_xplat(): + extra_deps = kwargs.pop("xplat_" + dep_type, []) + kwargs.pop("fbcode_" + dep_type, None) # Also remove the other one. + elif is_fbcode(): + extra_deps = kwargs.pop("fbcode_" + dep_type, []) + kwargs.pop("xplat_" + dep_type, None) # Also remove the other one. + else: + extra_deps = [] # Silence a not-initialized warning. + _fail_unknown_environment() + if extra_deps: + # This should work even with select() elements. + kwargs[dep_type] = kwargs.get(dep_type, []) + extra_deps + + # Patch up references to "//executorch/..." in lists of build targets, + # if necessary. + use_static_deps = kwargs.pop("use_static_deps", False) + for dep_type in ("deps", "exported_deps", "visibility"): + if kwargs.get(dep_type): + # deps may contain select() elements, dicts that map names to lists + # of targets. selects.apply() will run the provided function on all + # lists of targets in the provided object, but can also handle a + # simple list. See also + # https://www.internalfb.com/intern/qa/152401/what-is-a-select-in-buck + kwargs[dep_type] = selects.apply( + obj = kwargs.get(dep_type), + function = native.partial(_patch_executorch_references, use_static_deps = use_static_deps), + ) + + # Make all targets private by default, like in xplat. + if "visibility" not in kwargs: + kwargs["visibility"] = [] + + # If we see certain strings in the "visibility" list, expand them. + if "@EXECUTORCH_CLIENTS" in kwargs["visibility"]: + # See clients.bzl for this list. + kwargs["visibility"].remove("@EXECUTORCH_CLIENTS") + kwargs["visibility"].extend(EXECUTORCH_CLIENTS) + + return kwargs + +def _patch_kwargs_cxx(kwargs): + _patch_platforms(kwargs) + _remove_platform_specific_args(kwargs) + return _patch_kwargs_common(kwargs) + +def _cxx_library_common(*args, **kwargs): + _patch_kwargs_cxx(kwargs) + _patch_build_mode_flags(kwargs) + + if is_xplat(): + _patch_platform_build_mode_flags(kwargs) + fb_xplat_cxx_library(*args, **kwargs) + elif is_fbcode(): + # fbcode doesn't support `exported_headers`; squash everything into + # `headers`. + if "exported_headers" in kwargs: + exported_headers = kwargs.pop("exported_headers") + + # Note that this doesn't handle the case where one is a dict + # and the other is a list. + if is_dict(exported_headers): + headers = {} + headers.update(exported_headers) + headers.update(kwargs.get("headers", {})) + kwargs["headers"] = headers + elif is_list(exported_headers): + kwargs["headers"] = ( + exported_headers + kwargs.get("headers", []) + ) + else: + fail("Unhandled exported_headers type '{}'" + .format(type(exported_headers))) + if "exported_preprocessor_flags" in kwargs: + kwargs["propagated_pp_flags"] = kwargs.pop( + "exported_preprocessor_flags", + ) + + # fbcode doesn't support private dependencies. + if "reexport_all_header_dependencies" in kwargs: + kwargs.pop("reexport_all_header_dependencies") + cpp_library(*args, **kwargs) + else: + _fail_unknown_environment() + +def _cxx_library(*args, **kwargs): + define_static_target = kwargs.pop("define_static_target", True) + + # Determine linkage for this binary based on its children. + kwargs["preferred_linkage"] = "any" + _cxx_library_common(*args, **kwargs) + + # Optionally add a statically linked library target. + if define_static_target: + kwargs["name"] += "_static" + kwargs["preferred_linkage"] = "static" + kwargs["use_static_deps"] = True + _cxx_library_common(*args, **kwargs) + +def _cxx_binary_helper(*args, **kwargs): + _patch_kwargs_cxx(kwargs) + _patch_build_mode_flags(kwargs) + if is_xplat(): + _patch_platform_build_mode_flags(kwargs) + fb_xplat_cxx_binary(*args, **kwargs) + elif is_fbcode(): + cpp_binary(*args, **kwargs) + else: + _fail_unknown_environment() + +def _cxx_binary(*args, **kwargs): + define_static_target = kwargs.pop("define_static_target", True) + _cxx_binary_helper(*args, **kwargs) + if define_static_target: + kwargs["name"] += "_static" + kwargs["use_static_deps"] = True + _cxx_binary_helper(*args, **kwargs) + +def _cxx_test(*args, **kwargs): + # Inject test utils library. + if "deps" not in kwargs: + kwargs["deps"] = [] + kwargs["deps"].append("//executorch/test/utils:utils") + + _patch_kwargs_cxx(kwargs) + _patch_build_mode_flags(kwargs) + _patch_test_compiler_flags(kwargs) + + if is_xplat(): + _patch_platform_build_mode_flags(kwargs) + fb_xplat_cxx_test(*args, **kwargs) + elif is_fbcode(): + cpp_unittest(*args, **kwargs) + else: + _fail_unknown_environment() + +def _cxx_python_extension(*args, **kwargs): + _patch_kwargs_common(kwargs) + if is_xplat(): + fb_cxx_python_extension(*args, **kwargs) + elif is_fbcode(): + cpp_python_extension(*args, **kwargs) + else: + _fail_unknown_environment() + +def _export_file(*args, **kwargs): + _patch_kwargs_common(kwargs) + if is_xplat(): + # @lint-ignore BUCKLINT: fb_native is not allowed in fbcode. + fb_native.export_file(*args, **kwargs) + elif is_fbcode(): + export_file(*args, **kwargs) + else: + _fail_unknown_environment() + +def _filegroup(*args, **kwargs): + _patch_kwargs_common(kwargs) + if is_xplat(): + # @lint-ignore BUCKLINT: fb_native is not allowed in fbcode. + fb_native.filegroup(*args, **kwargs) + elif is_fbcode(): + buck_filegroup(*args, **kwargs) + else: + _fail_unknown_environment() + +def _genrule(*args, **kwargs): + _patch_kwargs_common(kwargs) + _patch_platforms(kwargs) + if kwargs.get("cmd"): + kwargs["cmd"] = _patch_executorch_genrule_cmd( + kwargs.get("cmd"), + kwargs.pop("macros_only", True), + ) + + # Really no difference between static and non-static in genrule, + # only to satisfy static build requirement. This is bad but works for now. + define_static_target = kwargs.pop("define_static_target", True) + if is_xplat(): + fb_xplat_genrule(*args, **kwargs) + elif is_fbcode(): + buck_genrule(*args, **kwargs) + if define_static_target: + kwargs["name"] += "_static" + if is_xplat(): + fb_xplat_genrule(*args, **kwargs) + elif is_fbcode(): + buck_genrule(*args, **kwargs) + else: + _fail_unknown_environment() + +def _python_library(*args, **kwargs): + _patch_kwargs_common(kwargs) + if is_xplat(): + fb_python_library(*args, **kwargs) + elif is_fbcode(): + python_library(*args, **kwargs) + else: + _fail_unknown_environment() + +def _python_binary(*args, **kwargs): + _patch_kwargs_common(kwargs) + if is_xplat(): + fb_python_binary(*args, **kwargs) + elif is_fbcode(): + python_binary(*args, **kwargs) + else: + _fail_unknown_environment() + +def _python_test(*args, **kwargs): + _patch_kwargs_common(kwargs) + if is_xplat(): + fb_python_test(*args, **kwargs) + elif is_fbcode(): + python_unittest(*args, **kwargs) + else: + _fail_unknown_environment() + +# Names in this struct should match the standard Buck rule names if possible: +# see the "Build Rules" section in the sidebar of +# https://buck.build/concept/build_rule.html. +runtime = struct( + cxx_binary = _cxx_binary, + cxx_library = _cxx_library, + cxx_python_extension = _cxx_python_extension, + cxx_test = _cxx_test, + export_file = _export_file, + filegroup = _filegroup, + genrule = _genrule, + python_binary = _python_binary, + python_library = _python_library, + python_test = _python_test, +) diff --git a/codegen/codegen.bzl b/codegen/codegen.bzl new file mode 100644 index 00000000000..e566262f83d --- /dev/null +++ b/codegen/codegen.bzl @@ -0,0 +1,443 @@ +load("@fbsource//tools/build_defs:fbsource_utils.bzl", "is_xplat") +load("@fbsource//xplat/caffe2:pt_ops.bzl", "pt_operator_library") +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "get_default_executorch_platforms", "runtime") + +# Headers that declare the function signatures of the C++ functions that +# map to entries in functions.yaml and custom_ops.yaml. +OPERATOR_HEADERS = [ + # buildifier: keep sorted + "Functions.h", + "NativeFunctions.h", +] + +STATIC_DISPATCH_BACKENDS = [ + "CPU", +] + +# In ATen enabled mode, Functions.h will call into ATen/CPUFunctions.h and +# other ATen generated code. Hence we don't need to include the code generated +# by executorch codegen. +GENERATED_SOURCES = [ + # buildifier: keep sorted + "RegisterCodegenUnboxedKernelsEverything.cpp", +] + +# Fake kernels only return `out` or any other tensor from arguments +CUSTOM_OPS_DUMMY_KERNEL_SOURCES = ["Register{}Stub.cpp".format(backend) for backend in STATIC_DISPATCH_BACKENDS] + +CUSTOM_OPS_GENERATED_SOURCES = ["Register{}CustomOps.cpp".format(backend) for backend in STATIC_DISPATCH_BACKENDS] + +CUSTOM_OPS_NATIVE_FUNCTION_HEADER = ["CustomOpsNativeFunctions.h"] + +CUSTOM_OPS_SCHEMA_REGISTRATION_SOURCES = [ + "RegisterSchema.cpp", +] + +# Hide the dependency to caffe2 internally. +def et_operator_library( + name, + ops = [], + exported_deps = [], + model = None, + include_all_operators = False, + ops_schema_yaml_target = None, + define_static_targets = False, + **kwargs): + genrule_cmd = [ + "$(exe //executorch/codegen/tools:gen_oplist)", + "--output_path=${OUT}", + ] + if ops_schema_yaml_target: + genrule_cmd.append( + "--ops_schema_yaml_path=$(location {})".format(ops_schema_yaml_target), + ) + if ops: + genrule_cmd.append( + "--root_ops=" + ",".join(ops), + ) + if model: + genrule_cmd.append( + "--model_file_path=$(location {})".format(model), + ) + if ops_schema_yaml_target or ops or model: + runtime.genrule( + name = name, + macros_only = False, + cmd = " ".join(genrule_cmd), + out = "selected_operators.yaml", + labels = ["pt_operator_library"], + **kwargs + ) + else: + kwargs["exported_deps"] = exported_deps + kwargs["include_all_operators"] = include_all_operators + pt_operator_library( + name = name, + **kwargs + ) + if define_static_targets: + pt_operator_library( + name = name + "_static", + **kwargs + ) + +def _get_headers(genrule_name, prefix = "", custom_op = None): + return { + prefix + f: ":{}[{}]".format(genrule_name, f) + for f in OPERATOR_HEADERS + (CUSTOM_OPS_NATIVE_FUNCTION_HEADER if custom_op else []) + } + +def _prepare_genrule_and_lib( + name, + functions_yaml_path = None, + custom_ops_yaml_path = None, + custom_ops_aten_kernel_deps = [], + custom_ops_requires_runtime_registration = True, + aten_mode = False): + """ + This function returns two dicts `genrules` and `libs`, derived from the arguments being passed + to `executorch_generated_lib`. `genrules` contains all information related to what genrules to + run. The structure of it looks like this: + { + : { + "cmd": , + "outs": , + }, + : { + "cmd": , + "outs": , + }, + } + For `libs`, similarly it contains information related to what cxx_library we will generate. + The structure looks like: + { + : { + "genrule": , # where to find the source files + "srcs": , # the source file names + }, + : { + "genrule": , # where to find the source files + "srcs": , # the source file names + }, + } + """ + genrule_cmd = [ + "$(exe fbsource//xplat/caffe2/torchgen:gen_executorch)", + "--source-path=$(location //executorch/codegen:templates)", + "--tags-path $(location fbsource//xplat/caffe2:aten_src_path)/aten/src/ATen/native/tags.yaml", + "--aten_yaml_path $(location fbsource//xplat/caffe2:aten_src_path)/aten/src/ATen/native/native_functions.yaml", + "--install_dir=${OUT}", + # TODO(dbort): Add a second step that verifies that the set of + # actually-generated files matches GENERATED_FILES. + ] + + # The command will always generate these files. + genrule_outs = GENERATED_SOURCES + OPERATOR_HEADERS + (CUSTOM_OPS_NATIVE_FUNCTION_HEADER if custom_ops_yaml_path else []) + + # Determine what sources custom_ops_ target should include + custom_ops_sources = CUSTOM_OPS_SCHEMA_REGISTRATION_SOURCES + ( + CUSTOM_OPS_GENERATED_SOURCES if custom_ops_aten_kernel_deps else CUSTOM_OPS_DUMMY_KERNEL_SOURCES + ) + + genrules = {} + libs = {} + + # if aten_mode is true, we don't need functions_yaml_path + genrule_name = name + "_combined" + headers = _get_headers(genrule_name = genrule_name, custom_op = custom_ops_yaml_path) + + # need to register ATen ops into Executorch runtime: + need_reg_aten_ops = aten_mode or functions_yaml_path + + # need to register custom ops into Executorch runtime: + need_reg_custom_ops = custom_ops_yaml_path and custom_ops_requires_runtime_registration + + need_reg_ops = need_reg_aten_ops or need_reg_custom_ops + + if need_reg_aten_ops: + path = ( + "$(location fbsource//xplat/caffe2:aten_src_path)/aten/src/ATen/native/native_functions.yaml" + ) if not functions_yaml_path else functions_yaml_path + genrule_cmd = genrule_cmd + [ + "--functions_yaml_path={}".format(path), + ] + if aten_mode: + genrule_cmd = genrule_cmd + ["--use_aten_lib"] + if custom_ops_yaml_path: + genrule_cmd = genrule_cmd + [ + "--custom_ops_yaml_path=" + custom_ops_yaml_path, + ] + genrule_outs += custom_ops_sources + genrules[genrule_name] = { + "cmd": genrule_cmd, + "outs": genrule_outs, + } + + if need_reg_ops: + libs[name] = { + "genrule": genrule_name, + "headers": headers, + "srcs": GENERATED_SOURCES, + } + + header_lib = name + "_headers" + libs[header_lib] = { + "headers": headers, + } + if custom_ops_yaml_path: + # lib for registering custom ops to pytorch + libs["custom_ops_" + name] = { + "genrule": genrule_name, + "headers": headers, + "srcs": custom_ops_sources, + } + if header_lib in libs: + libs[header_lib]["headers"].update(headers) + else: + libs[header_lib] = { + "headers": headers, + } + return genrules, libs + +def executorch_generated_lib( + name, + functions_yaml_target = None, + custom_ops_yaml_target = None, + fallback_yaml_target = None, + define_static_targets = False, + custom_ops_aten_kernel_deps = [], + custom_ops_requires_runtime_registration = True, + visibility = [], + aten_mode = False, + use_default_aten_ops_lib = True, + deps = [], + xplat_deps = [], + fbcode_deps = [], + platforms = get_default_executorch_platforms(), + compiler_flags = []): + """Emits 0-3 C++ library targets (in fbcode or xplat) containing code to + dispatch the operators specified in the provided yaml files. + + Generates + * `` C++ library responsible to register both ATen operators and custom ops + into Executorch runtime. + * `custom_ops_` C++ library responsible to register custom ops into PyTorch + runtime. + Args: + name: The name of the C++ library target to emit. Also emits a + header-only C++ library target named `_headers` that declares + the signatures for the C++ functions that map to the entries in + `functions.yaml` and `custom_ops.yaml`. + If `custom_ops_yaml_target` is specified, also emits: + - `custom_ops_`: A host-only C++ library that declares and + registers the ops defined in that file. Clients can load this + library into local PyTorch using `torch.ops.load_library()` to + make them visible while authoring models. + functions_yaml_target: A Buck target pointing to the `functions.yaml` + file to use. Optional, but at least one of `functions_yaml_target` + and `custom_ops_yaml_target` must be specified. + custom_ops_yaml_target: A Buck target pointing to the `custom_ops.yaml` + file to use. Optional, but at least one of `functions_yaml_target` + and `custom_ops_yaml_target` must be specified. + fallback_yaml_target: A Buck target pointing to the yaml file for fallback + purpose. We will merge `functions.yaml` with the fallback_yaml if exist. + define_static_targets: If True, defines extra "_static" targets + for each of the internal cxx_libraries defined by this macro, each + with preferred_linkage="static". If false, does not define these + targets. + custom_ops_aten_kernel_deps: kernels for custom ops that can be registered + into PyTorch runtime. It needs to be depending on ATen basic types such + as `at::Tensor` and `c10::ScalarType` etc. If not provided, will auto + generate fake kernels for custom ops. + custom_ops_requires_runtime_registration: If false, don't generate + `` target if `functions_yaml_target` is None. If true, always + generate `` target no matter whether we have `functions_yaml_target`. + aten_mode: a boolean for whether we should use ATen kernels and ATen tensors. + visibility: Visibility of the C++ library targets. + deps: Additinal deps of the main C++ library. Needs to be in either `//executorch` or `//caffe2` module. + platforms: platforms args to runtime.cxx_library (only used when in xplat) + use_default_aten_ops_lib: If `aten_mode` is True AND this flag is True, use `torch_mobile_all_ops` for ATen operator library. + xplat_deps: Additional xplat deps, can be used to provide custom operator library. + fbcode_deps: Additional fbcode deps, can be used to provide custom operator library. + compiler_flags: compiler_flags args to runtime.cxx_library + """ + if functions_yaml_target and aten_mode: + fail("{} is providing functions_yaml_target in ATen mode, it will be ignored. `native_functions.yaml` will be the source of truth.".format(name)) + + if not aten_mode and not functions_yaml_target and not custom_ops_yaml_target: + fail("At least one of functions_yaml_target, custom_ops_yaml_target needs to be provided") + + aten_suffix = "_aten" if aten_mode else "" + + # merge functions.yaml with fallback yaml + if functions_yaml_target: + merge_yaml_name = name + "_merge_yaml" + cmd = ("$(exe fbsource//xplat/executorch/codegen/tools:merge_yaml) " + + "--functions_yaml_path=$(location {}) ".format(functions_yaml_target) + + "--output_dir=$OUT ") + if fallback_yaml_target: + cmd = cmd + "--fallback_yaml_path=$(location {}) ".format(fallback_yaml_target) + runtime.genrule( + name = merge_yaml_name, + macros_only = False, + cmd = cmd, + outs = {"merged.yaml": ["merged.yaml"]}, + default_outs = ["."], + platforms = platforms, + ) + functions_yaml_path = "$(location :{}[merged.yaml])".format(merge_yaml_name) + else: + functions_yaml_path = None + if custom_ops_yaml_target: + custom_ops_yaml_path = "$(location {})".format(custom_ops_yaml_target) + else: + custom_ops_yaml_path = None + + genrules, libs = _prepare_genrule_and_lib( + name = name, + functions_yaml_path = functions_yaml_path, + custom_ops_yaml_path = custom_ops_yaml_path, + custom_ops_aten_kernel_deps = custom_ops_aten_kernel_deps, + custom_ops_requires_runtime_registration = custom_ops_requires_runtime_registration, + aten_mode = aten_mode, + ) + + # genrule for selective build from static operator list + oplist_dir_name = name + "_pt_oplist" + runtime.genrule( + name = oplist_dir_name, + macros_only = False, + cmd = ("$(exe fbsource//xplat/executorch/codegen/tools:gen_all_oplist) " + + "--model_file_list_path $(@query_outputs 'attrfilter(labels, pt_operator_library, deps(set({deps})))') " + + "--allow_include_all_overloads " + + "--output_dir $OUT ").format(deps = " ".join(["\"{}\"".format(d) for d in deps])), + outs = {"selected_operators.yaml": ["selected_operators.yaml"]}, + default_outs = ["."], + platforms = platforms, + ) + + # codegen genrule(s). For ATen mode we expect two genrules, one for ATen ops one for custom ops. + for genrule_name in genrules: + genrules[genrule_name]["cmd"].append( + "--op_selection_yaml_path=$(location :{}[selected_operators.yaml])".format(oplist_dir_name), + ) + runtime.genrule( + name = genrule_name, + cmd = " ".join(genrules[genrule_name]["cmd"]), + outs = {f: [f] for f in genrules[genrule_name]["outs"]}, + default_outs = ["."], + platforms = platforms, + ) + + # Exports headers that declare the function signatures of the C++ functions + # that map to entries in `functions.yaml` and `custom_ops.yaml`. + # For ATen mode, the headers will be `aten_Functions.h`, `aten_NativeFunctions.h` and `aten_UnboxingFunctions.h` + # along with headers declaring custom ops `Functions.h`, `NativeFunctions.h` and `UnboxingFunctions.h`. + header_lib = name + "_headers" + if header_lib in libs: + runtime.cxx_library( + name = header_lib, + srcs = [], + exported_headers = libs[header_lib]["headers"], + visibility = visibility, + # Relax visibility restrictions since deps may include targets + # outside of //executorch. + _is_external_target = True, + platforms = platforms, + compiler_flags = compiler_flags, + exported_deps = [ + "//executorch/kernels:kernel_runtime_context" + aten_suffix, + ], + ) + + xplat_deps = xplat_deps + (["//xplat/caffe2:torch_mobile_all_ops"] if aten_mode and use_default_aten_ops_lib else []) + fbcode_deps = fbcode_deps + (["//caffe2:libtorch"] if aten_mode and use_default_aten_ops_lib else []) + if name in libs: + lib_name = name + runtime.cxx_library( + name = lib_name, + srcs = [ + ":{}[{}]".format(libs[lib_name]["genrule"], f) + for f in libs[lib_name]["srcs"] + ], + # Note that all of these generated headers are only used by this library + # target, and are not meant to be used by targets outside of this + # directory. + headers = libs[lib_name]["headers"], + exported_preprocessor_flags = ["-DUSE_ATEN_LIB"] if aten_mode else [], + # link_whole is necessary because the operators register themselves via + # static initializers that run at program startup. + # @lint-ignore BUCKLINT link_whole + link_whole = True, + visibility = visibility, + # Operator Registration is done through static tables + compiler_flags = ["-Wno-global-constructors"] + compiler_flags, + deps = [ + "//executorch/core:operator_registry", + "//executorch/core/prim_ops:prim_ops_registry" + aten_suffix, + "//executorch/core/values:executor_values" + aten_suffix, + "//executorch/profiler:profiler", + ] + deps, + exported_deps = [ + "//executorch/core/kernel_types:kernel_types" + aten_suffix, + "//executorch/kernels:kernel_runtime_context" + aten_suffix, + ], + xplat_deps = xplat_deps, + fbcode_deps = fbcode_deps, + define_static_target = define_static_targets, + # Relax visibility restrictions since deps may include targets outside + # of //executorch. + _is_external_target = True, + platforms = platforms, + ) + + # If custom ops are provided, emit a host-only C++ library that declares and + # registers them. Clients can load this library into local PyTorch using + # `torch.ops.load_library()` to make them visible while authoring models. + # + # For the embedded runtime, clients should depend on the `` + # cxx_library above, which will register the custom ops as long as + # custom_ops_requires_runtime_registration is True. + compiler_lib = "custom_ops_" + name if "custom_ops_" + name in libs else None + if compiler_lib: + # The library needs to be able to include . + if is_xplat(): + torch_dep = ["//xplat/caffe2:torch"] + else: + torch_dep = ["//caffe2:libtorch"] + + # TODO(T129125039): Rename this to make it clear that it's not part of + # the embedded runtime; it's only for registering custom ops with the + # PyTorch authoring runtime. + runtime.cxx_library( + name = compiler_lib, + srcs = [ + ":{}[{}]".format(libs[compiler_lib]["genrule"], f) + for f in libs[compiler_lib]["srcs"] + ], + headers = { + "CustomOpsNativeFunctions.h": ":{}[CustomOpsNativeFunctions.h]".format(libs[compiler_lib]["genrule"]), + }, + # link_whole is necessary because the operators register themselves + # via static initializers that run at program startup. + # @lint-ignore BUCKLINT link_whole + link_whole = True, + visibility = visibility, + deps = [ + "//executorch/core/kernel_types:kernel_types_aten", + "//executorch/core:core", + ] + torch_dep + custom_ops_aten_kernel_deps, + exported_deps = [ + "//executorch/kernels:kernel_runtime_context_aten", + ], + define_static_target = define_static_targets, + # Relax visibility restrictions since deps may include targets + # outside of //executorch. + _is_external_target = True, + # Explicitly indicate that this C++ library will be loaded by Python + # and consequently need to be exposed as shared libraries. It's not + # required, but when set it'll make builds faster. + supports_python_dlopen = True, + platforms = platforms, + compiler_flags = compiler_flags, + ) diff --git a/codegen/targets.bzl b/codegen/targets.bzl new file mode 100644 index 00000000000..bc92b0ba985 --- /dev/null +++ b/codegen/targets.bzl @@ -0,0 +1,22 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + + See README.md for instructions on selective build. + """ + runtime.filegroup( + name = "templates", + srcs = native.glob([ + "templates/**/*.cpp", + "templates/**/*.ini", + "templates/**/*.h", + ]), + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + ) diff --git a/codegen/tools/targets.bzl b/codegen/tools/targets.bzl new file mode 100644 index 00000000000..68f5254253f --- /dev/null +++ b/codegen/tools/targets.bzl @@ -0,0 +1,70 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + + See README.md for instructions on selective build. + """ + runtime.python_binary( + name = "gen_oplist", + main_module = "executorch.codegen.tools.gen_oplist", + deps = [ + ":gen_oplist_lib", + ], + package_style = "inplace", + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + runtime.python_library( + name = "yaml_util", + base_module = "executorch.codegen.tools", + srcs = ["yaml_util.py"], + ) + + runtime.python_library( + name = "merge_yaml_lib", + srcs = ["merge_yaml.py"], + base_module = "executorch.codegen.tools", + deps = [ + ":yaml_util", + ], + fbcode_deps = [ + "//caffe2/torchgen:torchgen", + ], + xplat_deps = [ + "//xplat/caffe2/torchgen:torchgen", + ], + ) + + runtime.python_binary( + name = "merge_yaml", + main_module = "executorch.codegen.tools.merge_yaml", + deps = [ + ":merge_yaml_lib", + ], + package_style = "inplace", + _is_external_target = True, + visibility = ["PUBLIC"], + ) + + runtime.python_test( + name = "test_gen_oplist", + base_module = "", + srcs = [ + "test/test_gen_oplist.py", + ], + deps = [ + ":gen_oplist_lib", + ], + package_style = "inplace", + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + ) diff --git a/compiler/targets.bzl b/compiler/targets.bzl new file mode 100644 index 00000000000..44646ae6e3a --- /dev/null +++ b/compiler/targets.bzl @@ -0,0 +1,16 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "compiler", + exported_headers = [ + "Compiler.h", + ], + visibility = ["//executorch/..."], + ) diff --git a/configurations/targets.bzl b/configurations/targets.bzl new file mode 100644 index 00000000000..0abe9b2a551 --- /dev/null +++ b/configurations/targets.bzl @@ -0,0 +1,28 @@ +"""Client build configurations. + +This package contains useful build targets for executorch clients, assembling +common collections of deps into self-contained targets. +""" + +load("@fbsource//xplat/executorch/backends:backends.bzl", "get_all_cpu_backend_targets") +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + # An extended executor library that includes all CPU backend targets and + # helper deps. + runtime.cxx_library( + name = "executor_cpu_optimized", + exported_deps = [ + "//executorch/threadpool:threadpool", + ] + get_all_cpu_backend_targets(), + visibility = [ + "//executorch/test/...", + "@EXECUTORCH_CLIENTS", + ], + ) diff --git a/core/kernel_types/lean/targets.bzl b/core/kernel_types/lean/targets.bzl new file mode 100644 index 00000000000..359cfef89fd --- /dev/null +++ b/core/kernel_types/lean/targets.bzl @@ -0,0 +1,59 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + # Internal target for executor tensor. Clients should depend on + # :kernel_types below to be flexible with ATen Tensor and executor Tensor. + runtime.cxx_library( + name = "kernel_types", + srcs = ["tensor_impl.cpp"], + exported_headers = [ + "tensor_options.h", + "optional.h", + "scalar.h", + "tensor.h", + "tensor_impl.h", + "string_view.h", + "device.h", + ], + # Only should be depended on by kernel_types:kernel_types, but various suffixes like Android and Static + # mean I cant just expose visibility to a single rule. + visibility = [ + "//executorch/core/kernel_types/...", + ], + exported_deps = [ + ":scalar_type", + "//executorch/core:core", + "//executorch/core/kernel_types:tensor_shape_dynamism", + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:dim_order_util", + "//executorch/core/values:executor_tag", + ], + ) + + # Set up a specific exported library for scalar_type to avoid circle dependency in ScalarTypeUtil.h + runtime.cxx_library( + name = "scalar_type", + srcs = [ + ], + exported_headers = [ + "bfloat16.h", + "complex.h", + "half.h", + "scalar_type.h", + "qint_types.h", + "bits_types.h", + ], + visibility = [ + "//executorch/core/kernel_types/util/...", + ], + exported_deps = [ + "//executorch/core:core", + "//executorch/core/values:executor_tag", + ], + ) diff --git a/core/kernel_types/lean/test/targets.bzl b/core/kernel_types/lean/test/targets.bzl new file mode 100644 index 00000000000..321c52bcce0 --- /dev/null +++ b/core/kernel_types/lean/test/targets.bzl @@ -0,0 +1,39 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + runtime.cxx_test( + name = "optional_test", + srcs = ["optional_test.cpp"], + deps = [ + "//executorch/core/kernel_types:kernel_types", + ], + ) + + runtime.cxx_test( + name = "executor_tensor_test", + srcs = ["executor_tensor_test.cpp"], + deps = [ + "//executorch/core/kernel_types:kernel_types", + ], + ) + + runtime.cxx_test( + name = "scalar_test", + srcs = ["scalar_test.cpp"], + deps = [ + "//executorch/core/kernel_types:kernel_types", + ], + ) + + runtime.cxx_test( + name = "tensor_impl_test", + srcs = ["tensor_impl_test.cpp"], + deps = [ + "//executorch/core/kernel_types:kernel_types", + ], + ) diff --git a/core/kernel_types/targets.bzl b/core/kernel_types/targets.bzl new file mode 100644 index 00000000000..8593a49307e --- /dev/null +++ b/core/kernel_types/targets.bzl @@ -0,0 +1,35 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "tensor_shape_dynamism", + exported_headers = [ + "TensorShapeDynamism.h", + ], + visibility = [ + "//executorch/core/kernel_types/...", + ], + ) + + for aten_mode in (True, False): + aten_suffix = "_aten" if aten_mode else "" + + # Depend on this target if your types (Tensor, ArrayRef, etc) should be flexible between ATen and executor + runtime.cxx_library( + name = "kernel_types" + aten_suffix, + exported_headers = ["kernel_types.h"], + exported_preprocessor_flags = ["-DUSE_ATEN_LIB"] if aten_mode else [], + # Visible because clients may want to build ATen-specific versions + # of their custom operators, to load into local PyTorch using + # `torch.ops.load_library()`. See codegen.bzl. + visibility = ["//executorch/...", "@EXECUTORCH_CLIENTS"], + exported_deps = [":tensor_shape_dynamism"] + ([] if aten_mode else ["//executorch/core/kernel_types/lean:kernel_types"]), + fbcode_exported_deps = ["//caffe2:torch-cpp"] if aten_mode else [], + xplat_exported_deps = ["//xplat/caffe2:torch_mobile_core"] if aten_mode else [], + ) diff --git a/core/kernel_types/test/targets.bzl b/core/kernel_types/test/targets.bzl new file mode 100644 index 00000000000..7cdabf0ae08 --- /dev/null +++ b/core/kernel_types/test/targets.bzl @@ -0,0 +1,16 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_test( + name = "tensor_shape_dynamism_test_aten", + srcs = ["TensorShapeDynamismAtenTest.cpp"], + deps = [ + "//executorch/core/kernel_types:kernel_types_aten", + ], + ) diff --git a/core/kernel_types/testing/targets.bzl b/core/kernel_types/testing/targets.bzl new file mode 100644 index 00000000000..55fee755ac9 --- /dev/null +++ b/core/kernel_types/testing/targets.bzl @@ -0,0 +1,75 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + for aten_mode in (True, False): + aten_suffix = ("_aten" if aten_mode else "") + + xplat_deps = ["//xplat/third-party/gmock:gmock"] + (["//xplat/caffe2:torch_mobile_all_ops"] if aten_mode else []) + fbcode_deps = ["fbsource//third-party/googletest:gtest", "fbsource//third-party/googletest:gmock"] + (["//caffe2:libtorch"] if aten_mode else []) + + runtime.cxx_library( + name = "tensor_util" + (aten_suffix), + srcs = ["TensorUtil.cpp"], + exported_headers = [ + "TensorUtil.h", + "TensorFactory.h", + ], + visibility = [ + # Be strict with the visibility so that operator implementations + # under //executorch/kernels/... can't depend on this test-only + # target. It's ok to add any //executorch/*/test/... path to this + # list. + "//executorch/core/kernel_types/util/test/...", + "//executorch/core/values/test/...", + "//executorch/core/prim_ops/test/...", + "//executorch/kernels/portable/test/...", + "//executorch/kernels/portable/cpu/util/test/...", + "//executorch/kernels/quantized/test/...", + "//executorch/kernels/optimized/test/...", + "//executorch/kernels/test/...", + "//executorch/core/test/...", + "//executorch/test/...", + "//executorch/util/...", + "//executorch/backends/qnnpack/test/...", + "@EXECUTORCH_CLIENTS", + ], + compiler_flags = ["-Wno-unneeded-internal-declaration"], + exported_preprocessor_flags = ["-DUSE_ATEN_LIB"] if aten_mode else [], + exported_deps = [ + "//executorch/core/kernel_types:kernel_types" + aten_suffix, + "//executorch/core/kernel_types/util:scalar_type_util" + aten_suffix, + "//executorch/core/kernel_types/util:tensor_util" + aten_suffix, + ], + fbcode_exported_deps = fbcode_deps, + xplat_exported_deps = xplat_deps, + ) + + runtime.cxx_test( + name = "tensor_util_test", + srcs = ["test/TensorUtilTest.cpp"], + deps = [ + ":tensor_util", + ], + ) + + runtime.cxx_test( + name = "tensor_factory_test", + srcs = ["test/TensorFactoryTest.cpp"], + deps = [ + ":tensor_util", + ], + ) + + runtime.cxx_test( + name = "tensor_factory_test_aten", + srcs = ["test/TensorFactoryTest.cpp"], + preprocessor_flags = ["-DUSE_ATEN_LIB"], + deps = [ + ":tensor_util_aten", + ], + ) diff --git a/core/kernel_types/util/targets.bzl b/core/kernel_types/util/targets.bzl new file mode 100644 index 00000000000..5aa371d5e98 --- /dev/null +++ b/core/kernel_types/util/targets.bzl @@ -0,0 +1,70 @@ +load("@fbsource//tools/build_defs:fbsource_utils.bzl", "is_xplat") +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + # get deps for kernel_types + if is_xplat(): + aten_types_deps = ["//xplat/caffe2:torch_mobile_core"] + else: + aten_types_deps = ["//caffe2:torch-cpp"] + + for aten_mode in (True, False): + aten_suffix = "_aten" if aten_mode else "" + + runtime.cxx_library( + name = "scalar_type_util" + aten_suffix, + srcs = [], + exported_headers = [ + "ScalarTypeUtil.h", + ], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_preprocessor_flags = ["-DUSE_ATEN_LIB"] if aten_mode else [], + exported_deps = [ + "//executorch/core:core", + ] + aten_types_deps if aten_mode else ["//executorch/core/kernel_types/lean:scalar_type"], + ) + + runtime.cxx_library( + name = "dim_order_util" + aten_suffix, + srcs = [], + exported_headers = [ + "DimOrderUtils.h", + ], + exported_deps = [ + "//executorch/core:core", + ], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_preprocessor_flags = ["-DUSE_ATEN_LIB"] if aten_mode else [], + ) + + runtime.cxx_library( + name = "tensor_util" + aten_suffix, + srcs = ["aten_tensor_util.cpp"] if aten_mode else ["lean_tensor_util.cpp"], + exported_headers = [ + "tensor_util.h", + ], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_preprocessor_flags = ["-DUSE_ATEN_LIB"] if aten_mode else [], + exported_deps = [ + "//executorch/core:core", + ] + [ + "//executorch/core/kernel_types:kernel_types" + aten_suffix, + ":scalar_type_util" + aten_suffix, + ":dim_order_util" + aten_suffix, + ], + ) diff --git a/core/kernel_types/util/test/targets.bzl b/core/kernel_types/util/test/targets.bzl new file mode 100644 index 00000000000..087f550a359 --- /dev/null +++ b/core/kernel_types/util/test/targets.bzl @@ -0,0 +1,46 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_test( + name = "scalar_type_util_test", + srcs = ["ScalarTypeUtilTest.cpp"], + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ], + ) + + runtime.cxx_test( + name = "tensor_util_test", + srcs = ["TensorUtilTest.cpp"], + deps = [ + "//executorch/core/kernel_types/testing:tensor_util", + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ], + ) + + runtime.cxx_test( + name = "operator_impl_example_test", + srcs = ["OperatorImplExampleTest.cpp"], + deps = [ + "//executorch/core/kernel_types/testing:tensor_util", + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ], + ) + + runtime.cxx_test( + name = "dim_order_util_test", + srcs = ["DimOrderUtilsTest.cpp"], + deps = [ + "//executorch/core/kernel_types/testing:tensor_util", + "//executorch/core/kernel_types/util:tensor_util", + ], + ) diff --git a/core/prim_ops/targets.bzl b/core/prim_ops/targets.bzl new file mode 100644 index 00000000000..f91b9f3e1c7 --- /dev/null +++ b/core/prim_ops/targets.bzl @@ -0,0 +1,44 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + for aten_mode in (True, False): + aten_suffix = ("_aten" if aten_mode else "") + + runtime.cxx_library( + name = "et_copy_index" + aten_suffix, + srcs = ["et_copy_index.cpp"], + visibility = [], # Private + exported_headers = ["et_copy_index.h"], + deps = [ + "//executorch/kernels:kernel_includes" + aten_suffix, + ], + exported_deps = [ + "//executorch/core/values:executor_values" + aten_suffix, + "//executorch/kernels:kernel_runtime_context" + aten_suffix, + ], + ) + + runtime.cxx_library( + name = "prim_ops_registry" + aten_suffix, + srcs = ["RegisterPrimOps.cpp"], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + # @lint-ignore BUCKLINT link_whole, need this to register prim ops. + link_whole = True, + # prim ops are registered through a global table so the ctor needs to be allowed + compiler_flags = ["-Wno-global-constructors"], + deps = [ + ":et_copy_index" + aten_suffix, + "//executorch/core/values:executor_values" + aten_suffix, + "//executorch/core:operator_registry", + "//executorch/kernels:kernel_includes" + aten_suffix, + ], + ) diff --git a/core/prim_ops/test/targets.bzl b/core/prim_ops/test/targets.bzl new file mode 100644 index 00000000000..2aa3471d801 --- /dev/null +++ b/core/prim_ops/test/targets.bzl @@ -0,0 +1,20 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + runtime.cxx_test( + name = "register_prim_ops_test", + srcs = [ + "RegisterPrimOpsTest.cpp", + ], + deps = [ + "//executorch/core/kernel_types/testing:tensor_util", + "//executorch/core/prim_ops:prim_ops_registry", + "//executorch/core:operator_registry", + "//executorch/kernels:kernel_runtime_context", + ], + ) diff --git a/core/targets.bzl b/core/targets.bzl new file mode 100644 index 00000000000..86c129e5217 --- /dev/null +++ b/core/targets.bzl @@ -0,0 +1,130 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "core", + srcs = [ + "Runtime.cpp", + "String.cpp", + ], + exported_headers = [ + "ArrayRef.h", + "Constants.h", + "Error.h", + "FunctionRef.h", + "Result.h", + "Runtime.h", + "String.h", + "macros.h", + "span.h", + ], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + deps = [ + ":log", + "//executorch/profiler:profiler", + ], + exported_deps = [ + ":abort", + ":log", + "//executorch/compiler:compiler", + "//executorch/platform:platform", + # Must be exported to include the weak symbols it defines. + "//executorch/platform:platform_private", + ], + ) + + runtime.cxx_library( + name = "abort", + srcs = [ + "Abort.cpp", + ], + exported_headers = [ + "Assert.h", + "Abort.h", + ], + deps = [ + "//executorch/compiler:compiler", + "//executorch/platform:platform_private", + ], + exported_deps = [ + ":log", + "//executorch/compiler:compiler", + "//executorch/platform:platform", + ], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + reexport_all_header_dependencies = False, + ) + + runtime.cxx_library( + name = "log", + srcs = [ + "Log.cpp", + ], + exported_headers = [ + "Log.h", + ], + deps = [ + "//executorch/compiler:compiler", + "//executorch/platform:platform", + "//executorch/platform:platform_private", + ], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + runtime.cxx_library( + name = "freeable_buffer", + exported_headers = ["FreeableBuffer.h"], + visibility = [ + "//executorch/backends/...", + "//executorch/core/test/...", + "//executorch/executor/...", + "@EXECUTORCH_CLIENTS", + ], + deps = [], + ) + + runtime.cxx_library( + name = "data_loader", + exported_headers = [ + "DataLoader.h", + ], + exported_deps = [ + "//executorch/compiler:compiler", + "//executorch/core:core", + ":freeable_buffer", + ], + visibility = [ + "//executorch/core/test/...", + "//executorch/executor/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + runtime.cxx_library( + name = "operator_registry", + srcs = ["OperatorRegistry.cpp"], + exported_headers = ["OperatorRegistry.h"], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + ":core", + "//executorch/core/values:executor_values", + ], + ) diff --git a/core/test/targets.bzl b/core/test/targets.bzl new file mode 100644 index 00000000000..a79e9b7bc7d --- /dev/null +++ b/core/test/targets.bzl @@ -0,0 +1,133 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/codegen:codegen.bzl", "et_operator_library", "executorch_generated_lib") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_test( + name = "arrayref_test", + srcs = ["ArrayRefTest.cpp"], + deps = [ + "//executorch/core:core", + ], + ) + + runtime.cxx_test( + name = "span_test", + srcs = ["span_test.cpp"], + deps = [ + "//executorch/core:core", + ], + ) + + runtime.cxx_test( + name = "StringTest", + srcs = ["StringTest.cpp"], + deps = [ + "//executorch/core:core", + ], + ) + + runtime.cxx_test( + name = "logging_test", + srcs = [ + "LoggingTest.cpp", + ], + deps = [ + "//executorch/core:core", + ], + compiler_flags = [ + # Turn on debug logging. + "-DET_MIN_LOG_LEVEL=Debug", + ], + ) + + runtime.cxx_test( + name = "error_handling_test", + srcs = [ + "ErrorHandlingTest.cpp", + ], + deps = [ + "//executorch/core:core", + ], + ) + + runtime.cxx_test( + name = "function_ref_test", + srcs = [ + "FunctionRefTest.cpp", + ], + deps = [ + "//executorch/core:core", + ], + ) + + runtime.cxx_test( + name = "freeable_buffer_test", + srcs = [ + "FreeableBufferTest.cpp", + ], + deps = [ + "//executorch/core:freeable_buffer", + ], + ) + + runtime.cxx_test( + name = "operator_registry_test", + srcs = [ + "OperatorRegistryTest.cpp", + ], + deps = [ + "//executorch/core:operator_registry", + "//executorch/kernels:kernel_runtime_context", + ], + ) + + et_operator_library( + name = "executorch_all_ops", + include_all_operators = True, + define_static_targets = True, + ) + + executorch_generated_lib( + name = "test_generated_lib_1", + deps = [ + ":executorch_all_ops", + "//executorch/kernels/portable:operators", + ], + functions_yaml_target = "//executorch/kernels/portable:functions.yaml", + visibility = [ + "//executorch/...", + ], + ) + + runtime.export_file( + name = "functions.yaml", + ) + + executorch_generated_lib( + name = "specialized_kernel_generated_lib", + deps = [ + ":executorch_all_ops", + "//executorch/kernels/portable:operators", + ], + functions_yaml_target = ":functions.yaml", + visibility = [ + "//executorch/...", + ], + ) + + runtime.cxx_test( + name = "kernel_double_registration_test", + srcs = [ + "KernelDoubleRegistrationTest.cpp", + ], + deps = [ + "//executorch/core:operator_registry", + ":specialized_kernel_generated_lib", + ], + ) diff --git a/core/values/targets.bzl b/core/values/targets.bzl new file mode 100644 index 00000000000..a3b03fef230 --- /dev/null +++ b/core/values/targets.bzl @@ -0,0 +1,36 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + for aten_mode in (True, False): + aten_suffix = ("_aten" if aten_mode else "") + runtime.cxx_library( + name = "executor_values" + aten_suffix, + exported_headers = [ + "Evalue.h", + ], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/core:core", + "//executorch/core/kernel_types:kernel_types" + aten_suffix, + ":executor_tag", + ], + ) + + runtime.cxx_library( + name = "executor_tag", + exported_headers = [ + "Tag.h", + ], + visibility = [ + "//executorch/...", + ], + ) diff --git a/core/values/test/targets.bzl b/core/values/test/targets.bzl new file mode 100644 index 00000000000..58d65772590 --- /dev/null +++ b/core/values/test/targets.bzl @@ -0,0 +1,28 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_test( + name = "EvalueTest", + srcs = ["EvalueTest.cpp"], + deps = [ + "//executorch/core/kernel_types:kernel_types", + "//executorch/core/kernel_types/testing:tensor_util", + "//executorch/core/values:executor_values", + ], + ) + + runtime.cxx_test( + name = "EvalueTest_aten", + srcs = ["EvalueTest.cpp"], + deps = [ + "//executorch/core/kernel_types:kernel_types_aten", + "//executorch/core/kernel_types/testing:tensor_util_aten", + "//executorch/core/values:executor_values_aten", + ], + ) diff --git a/executor/targets.bzl b/executor/targets.bzl new file mode 100644 index 00000000000..5d73edab97b --- /dev/null +++ b/executor/targets.bzl @@ -0,0 +1,107 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def _program_preprocessor_flags(): + """Returns the preprocessor_flags to use when building Program.cpp""" + + # The code for flatbuffer verification can add ~30k of .text to the binary. + # It's a valuable feature, but make it optional for space-constrained + # systems. + enable_verification = native.read_config( + "executorch", + "enable_program_verification", + # Default value + "true", + ) + if enable_verification == "false": + return ["-DET_ENABLE_PROGRAM_VERIFICATION=0"] + elif enable_verification == "true": + # Enabled by default. + return [] + else: + fail("executorch.enable_program_verification must be one of 'true' or 'false'; saw '" + + enable_verification + "'") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "memory_manager", + exported_headers = [ + "HierarchicalAllocator.h", + "MemoryAllocator.h", + "MemoryManager.h", + ], + exported_deps = [ + "//executorch/core:core", + "//executorch/profiler:profiler", + ], + visibility = [ + "//executorch/...", + "//executorch/test/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + runtime.cxx_library( + name = "program", + srcs = ["Program.cpp"], + exported_headers = ["Program.h"], + deps = [ + "//executorch/compiler:compiler", + "//executorch/core:core", + "//executorch/schema:extended_header", + "//executorch/schema:schema", + "//executorch/profiler:profiler", + ], + preprocessor_flags = _program_preprocessor_flags(), + exported_deps = ["//executorch/core:data_loader", "//executorch/core:freeable_buffer"], + visibility = ["//executorch/executor/...", "@EXECUTORCH_CLIENTS"], + ) + + for aten_mode in (True, False): + aten_suffix = "_aten" if aten_mode else "" + + runtime.cxx_library( + name = "executor" + aten_suffix, + srcs = [ + "Executor.cpp", + "tensor_parser{}.cpp".format(aten_suffix), + ], + deps = [ + "//executorch/backends:backend", + "//executorch/core/prim_ops:prim_ops_registry" + aten_suffix, + "//executorch/kernels:kernel_runtime_context" + aten_suffix, + "//executorch/profiler:profiler", + "//executorch/schema:schema", + ], + exported_deps = [ + "//executorch/compiler:compiler", + "//executorch/core:core", + "//executorch/core:operator_registry", + "//executorch/core/kernel_types/util:tensor_util" + aten_suffix, + "//executorch/core/kernel_types/util:dim_order_util", + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/values:executor_values", + "//executorch/executor:memory_manager", + "//executorch/core/kernel_types:kernel_types" + aten_suffix, + ":program", + ], + exported_headers = [ + "Executor.h", + ], + headers = [ + "tensor_parser.h", + ], + visibility = [ + "//executorch/backends/test/...", + "//executorch/executor/test/...", + "//executorch/pybindings/...", + "//executorch/test/...", + "//executorch/util/...", + "@EXECUTORCH_CLIENTS", + ], + ) diff --git a/executor/test/targets.bzl b/executor/test/targets.bzl new file mode 100644 index 00000000000..3d9b65020ee --- /dev/null +++ b/executor/test/targets.bzl @@ -0,0 +1,170 @@ +load("@fbsource//tools/build_defs:fbsource_utils.bzl", "is_fbcode") +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + for aten_mode in (True, False): + aten_suffix = ("_aten" if aten_mode else "") + runtime.cxx_library( + name = "test_backend_compiler_lib" + aten_suffix, + srcs = [ + "TestBackendCompilerLib.cpp", + ], + visibility = [ + "//executorch/backends/...", + "//executorch/pybindings/...", + "//executorch/test/...", + ], + # registration of backends is done through a static global + compiler_flags = ["-Wno-global-constructors"], + preprocessor_flags = ["-DUSE_ATEN_LIB"] if aten_mode else [], + exported_deps = [ + "//executorch/backends:backend" + aten_suffix, + ], + # TestBackendCompilerLib.cpp needs to compile with executor as whole + # @lint-ignore BUCKLINT: Avoid `link_whole=True` (https://fburl.com/avoid-link-whole) + link_whole = True, + ) + + runtime.cxx_test( + name = "executor_test", + srcs = [ + "ExecutorTest.cpp", + ], + deps = [ + "//executorch/core/kernel_types:kernel_types", + "//executorch/core/values:executor_values", + "//executorch/core:core", + "//executorch/core:operator_registry", + "//executorch/executor:executor", + "//executorch/kernels/portable:generated_lib", + "//executorch/kernels:kernel_runtime_context", + "//executorch/pytree:pytree", + "//executorch/test/utils:utils", + "//executorch/util:test_memory_config", + ], + ) + + runtime.cxx_library( + name = "managed_memory_manager", + srcs = [], + exported_headers = [ + "ManagedMemoryManager.h", + ], + visibility = [ + "//executorch/executor/test/...", + "//executorch/test/...", + "@EXECUTORCH_CLIENTS", + ], + deps = [ + "//executorch/executor:memory_manager", + ], + ) + + runtime.cxx_test( + name = "memory_allocator_test", + srcs = [ + "MemoryAllocatorTest.cpp", + ], + deps = [ + "//executorch/executor:memory_manager", + ], + ) + + # TODO(dbort): Find a way to make these run for ANDROID/APPLE in xplat. The + # android and ios test determinators don't like the reference to the model + # file in fbcode. See https://fburl.com/9esapdmd + if is_fbcode(): + modules_env = { + # The tests use this var to find the program file to load. This uses + # an fbcode target path because the authoring/export tools + # intentionally don't work in xplat (since they're host-only tools). + "ET_MODULE_ADD_PATH": "$(location fbcode//executorch/test/models:exported_programs[ModuleAdd.ff])", + "ET_MODULE_MULTI_ENTRY_PATH": "$(location fbcode//executorch/test/models:exported_programs[ModuleMultipleEntry.ff])", + } + + runtime.cxx_test( + name = "allocation_failure_stress_test", + srcs = [ + "AllocationFailureStressTest.cpp", + ], + deps = [ + ":managed_memory_manager", + "//executorch/executor:executor", + "//executorch/kernels/portable:generated_lib", + "//executorch/util:file_data_loader", + "//executorch/util:util", + ], + env = modules_env, + ) + + runtime.cxx_test( + name = "execution_plan_test", + srcs = [ + "ExecutionPlanTest.cpp", + ], + deps = [ + ":managed_memory_manager", + "//executorch/executor:executor", + "//executorch/util:util", + "//executorch/util:file_data_loader", + "//executorch/kernels/portable:generated_lib", + ], + env = modules_env, + ) + + runtime.cxx_test( + name = "program_test", + srcs = [ + "ProgramTest.cpp", + ], + deps = [ + "//executorch/executor:program", + "//executorch/util:embedded_data_loader", + "//executorch/util:file_data_loader", + ], + env = modules_env, + ) + + runtime.cxx_test( + name = "kernel_resolution_test", + srcs = [ + "KernelResolutionTest.cpp", + ], + deps = [ + ":managed_memory_manager", + "//executorch/executor:executor", + "//executorch/util:util", + "//executorch/util:file_data_loader", + ], + env = modules_env, + ) + + runtime.cxx_test( + name = "backend_integration_test", + srcs = [ + "BackendIntegrationTest.cpp", + ], + deps = [ + ":managed_memory_manager", + "//executorch/backends:backend", + "//executorch/executor:executor", + "//executorch/util:embedded_data_loader", + "//executorch/util:file_data_loader", + "//executorch/util:util", + ], + env = { + # The tests use these vars to find the program files to load. + # Uses an fbcode target path because the authoring/export tools + # intentionally don't work in xplat (since they're host-only + # tools). + "ET_MODULE_ADD_MUL_NOSEGMENTS_DA1024_PATH": "$(location fbcode//executorch/test/models:exported_delegated_programs[ModuleAddMul-nosegments-da1024.ff])", + "ET_MODULE_ADD_MUL_NOSEGMENTS_PATH": "$(location fbcode//executorch/test/models:exported_delegated_programs[ModuleAddMul-nosegments.ff])", + "ET_MODULE_ADD_MUL_PATH": "$(location fbcode//executorch/test/models:exported_delegated_programs[ModuleAddMul.ff])", + }, + ) diff --git a/exir/targets.bzl b/exir/targets.bzl new file mode 100644 index 00000000000..fec2a078201 --- /dev/null +++ b/exir/targets.bzl @@ -0,0 +1,32 @@ +load("//bento:buck_macros.bzl", "bento_kernel") + +executorch_bento_kernels_base_deps = [ + "//executorch/exir:bento_deps", + "//pye/lib:eager_model_base", +] + +executorch_portable_kernel_lib = ["//executorch/pybindings:portable"] +executorch_aten_mode_lib = ["//executorch/pybindings:aten_mode_lib"] + +model_inventory_list = [ + { + "deps": [ + "//on_device_ai/helios/arvr/nn:nn", + "//on_device_ai/helios/compiler/utils:utils", + "//on_device_ai/fx/tracers:leaf_modules", + "//on_device_ai/helios/arch_params:arch_params", + "//on_device_ai/helios/fx/passes:passes", + "//on_device_ai/helios/pytorch/turing:turing_ops", + ], + "name": "executorch_helios", + }, +] + +def load_executorch_bento_kernels(): + for entry in model_inventory_list: + for suffix in ("", "_portable"): + ops_lib = executorch_portable_kernel_lib if suffix else executorch_aten_mode_lib + bento_kernel( + name = entry["name"] + suffix, + deps = executorch_bento_kernels_base_deps + entry["deps"] + ops_lib, + ) diff --git a/kernels/aten/targets.bzl b/kernels/aten/targets.bzl new file mode 100644 index 00000000000..8e2f1b04c48 --- /dev/null +++ b/kernels/aten/targets.bzl @@ -0,0 +1,52 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/codegen:codegen.bzl", "et_operator_library", "executorch_generated_lib") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.export_file( + name = "functions.yaml", + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + et_operator_library( + name = "executorch_aten_ops", + ops_schema_yaml_target = ":functions.yaml", + define_static_targets = True, + ) + + executorch_generated_lib( + name = "generated_lib", + aten_mode = True, + deps = [ + ":executorch_aten_ops", + ], + functions_yaml_target = None, + define_static_targets = True, + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + # TODO(T149415783): temporarily testing portable kernel "in aten mode" and remove after migration is done + executorch_generated_lib( + name = "generated_lib_aten", + aten_mode = True, + deps = [ + ":executorch_aten_ops", + ], + functions_yaml_target = None, + define_static_targets = True, + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + ) diff --git a/kernels/aten/test/targets.bzl b/kernels/aten/test/targets.bzl new file mode 100644 index 00000000000..092645aef9b --- /dev/null +++ b/kernels/aten/test/targets.bzl @@ -0,0 +1,9 @@ +load("@fbsource//xplat/executorch/kernels/test:util.bzl", "define_supported_features_lib") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + define_supported_features_lib() diff --git a/kernels/optimized/cpu/targets.bzl b/kernels/optimized/cpu/targets.bzl new file mode 100644 index 00000000000..277f2a59889 --- /dev/null +++ b/kernels/optimized/cpu/targets.bzl @@ -0,0 +1,103 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/kernels/optimized:op_registration_util.bzl", "define_op_target", "op_target") + +_OPTIMIZED_ATEN_OPS = ( + op_target( + name = "op_add", + deps = [ + "//executorch/kernels/portable/cpu:scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + ], + ), + op_target( + name = "op_bmm", + deps = [ + "//executorch/kernels/optimized:libblas", + ], + ), + op_target( + name = "op_div", + deps = [ + "//executorch/kernels/portable/cpu:scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + ], + ), + op_target(name = "op_exp"), + op_target( + name = "op_gelu", + deps = select({ + "DEFAULT": [], + "ovr_config//runtime:fbcode-arm64": [ + "fbsource//third-party/sleef:sleef_arm", + ], + }), + ), + op_target( + name = "op_le", + deps = [ + "//executorch/kernels/portable/cpu:scalar_utils", + ], + ), + op_target( + name = "op_log_softmax", + deps = select({ + "DEFAULT": [], + "ovr_config//runtime:fbcode-arm64": [ + "fbsource//third-party/sleef:sleef_arm", + ], + }), + ), + op_target( + name = "op_mul", + deps = [ + "//executorch/kernels/portable/cpu:scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + ], + ), + op_target( + name = "op_native_layer_norm", + deps = [ + ":moments_utils", + ], + ), + op_target(name = "op_neg"), + op_target( + name = "op_sub", + deps = [ + "//executorch/kernels/portable/cpu:scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + ], + ), +) + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + # Define build targets for all operators registered in the tables above. + for op in _OPTIMIZED_ATEN_OPS: + define_op_target(**op) + + aten_op_targets = [":{}".format(op["name"]) for op in _OPTIMIZED_ATEN_OPS] + all_op_targets = aten_op_targets + + runtime.cxx_library( + name = "cpu_optimized", + srcs = [], + visibility = ["//executorch/kernels/..."], + exported_deps = all_op_targets, + ) + + runtime.cxx_library( + name = "moments_utils", + srcs = [], + exported_headers = ["moments_utils.h"], + visibility = ["//executorch/kernels/optimized/..."], + exported_deps = [ + "//executorch/kernels/optimized:libvec", + "//executorch/kernels/optimized:libutils", + ], + ) diff --git a/kernels/optimized/lib_defs.bzl b/kernels/optimized/lib_defs.bzl new file mode 100644 index 00000000000..f46e0f9a0c0 --- /dev/null +++ b/kernels/optimized/lib_defs.bzl @@ -0,0 +1,135 @@ +load("@fbsource//tools/build_defs:default_platform_defs.bzl", "DEVSERVER_PLATFORM_REGEX") +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +# Because vec exists as a collection of header files, compile and preprocessor +# flags applied to the vec target do not have any effect, since no compilation +# actually occurs for the target. +# +# Targets using the vec library must therefore call the get_vec_*_flags +# functions in order to declare the required compiler flags needed in order to +# access CPU vector intrinsics. + +def get_vec_android_preprocessor_flags(): + preprocessor_flags = [ + ( + "^android-arm64.*$", + [ + "-DET_BUILD_ARM_VEC256_WITH_SLEEF", + ], + ), + ] + return preprocessor_flags + +def get_vec_cxx_preprocessor_flags(): + preprocessor_flags = [ + ( + DEVSERVER_PLATFORM_REGEX, + [ + "-DCPU_CAPABILITY_AVX2", + ], + ), + ] + return preprocessor_flags + +def get_vec_fbcode_preprocessor_flags(): + preprocessor_flags = [ + "-DCPU_CAPABILITY_AVX2", + ] + return preprocessor_flags + +# Currently, having a dependency on fbsource//third-party/sleef:sleef may cause +# duplicate symbol errors when linking fbcode targets in opt mode that also +# depend on ATen. This is because ATen accesses sleef via the third-party folder +# in caffe2 (caffe2/third-party//sleef:sleef). +# TODO(ssjia): Enable -DCPU_CAPABILITY_AVX2 in fbcode, which requires sleef. +def define_libs(): + runtime.cxx_library( + name = "libvec", + srcs = [], + exported_headers = native.glob([ + "vec/**/*.h", + ]), + header_namespace = "executorch/kernels/optimized", + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + cxx_platform_deps = select({ + "DEFAULT": [ + ( + DEVSERVER_PLATFORM_REGEX, + [ + "fbsource//third-party/sleef:sleef", + ], + ), + ], + "ovr_config//cpu:arm64": [ + ( + DEVSERVER_PLATFORM_REGEX, + [ + "fbsource//third-party/sleef:sleef_arm", + ], + ), + ], + }), + fbandroid_platform_deps = [ + ( + "^android-arm64.*$", + [ + "fbsource//third-party/sleef:sleef_arm", + ], + ), + ], + ) + + runtime.cxx_library( + name = "libutils", + srcs = [], + exported_headers = native.glob([ + "utils/**/*.h", + ]), + header_namespace = "executorch/kernels/optimized", + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + # Needed to access the __ET_INLINE macro + "//executorch/compiler:compiler", + ], + ) + + runtime.cxx_library( + name = "libblas", + srcs = native.glob([ + "blas/**/*.cpp", + ]), + exported_headers = native.glob([ + "blas/**/*.h", + ]), + header_namespace = "executorch/kernels/optimized", + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + # TODO(ssjia): Link with Accelerate for Apple builds + fbandroid_platform_preprocessor_flags = [ + ( + "^android-arm64.*$", + [ + "-DET_BUILD_WITH_BLAS", + ], + ), + ], + fbandroid_platform_deps = [ + ( + "^android-arm64.*$", + [ + "fbsource//third-party/openblas:openblas", + ], + ), + ], + exported_deps = [ + "//executorch/kernels/optimized:libutils", + ], + ) diff --git a/kernels/optimized/op_registration_util.bzl b/kernels/optimized/op_registration_util.bzl new file mode 100644 index 00000000000..01c53304ca6 --- /dev/null +++ b/kernels/optimized/op_registration_util.bzl @@ -0,0 +1,115 @@ +load("@fbsource//tools/build_defs:selects.bzl", "selects") +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load( + "@fbsource//xplat/executorch/kernels/optimized:lib_defs.bzl", + "get_vec_android_preprocessor_flags", +) + +def op_target(name, deps = []): + """Registers an optimized implementation for an operator overload group. + + An operator overload group is a set of operator overloads with a common + operator name. That common operator name should be the base name of this + target. + + E.g., the "add" operator overload group, named "op_add" in this target, + might implement: + - add.Tensor + - add_.Tensor + - add.out + - add.Scalar + + If an op target would like to share a header/sources with a different op + target (e.g., helpers/utilities), it should declare a separate cxx_library + and add it as a dep. + + Args: + name: The name of the operator overload group; e.g., + "op_add". This directory must contain a source file named + ".cpp"; e.g., "op_add.cpp". + deps: Optional extra deps to add to the cxx_library(). Note: + - op targets may not depend on other op targets, to keep the + dependencies manageable. If two op targets would like to share + code, define a separate runtime.cxx_library that they both depend + on. + """ + + # Note that this doesn't actually define the target, but helps register + # it in a table that's used to define the target. + return { + "deps": deps, + "name": name, + } + +def _enforce_deps(deps, name): + """Fails if any of the deps are not allowed. + + Args: + deps: A list of build target strings. + name: The name of the target; e.g., "op_add" + """ + for dep in deps: + if dep.startswith(":op_"): + # op targets may not depend on other op targets, to keep the + # dependencies manageable. If two op targets would like to share + # code, define a separate runtime.cxx_library that they both depend + # on. + fail("op_target {} may not depend on other op_target {}".format( + name, + dep, + )) + +def define_op_library(name, deps): + """Defines a cxx_library target for the named operator overload group. + + Args: + name: The name of the target; e.g., "op_add" + deps: List of deps for the target. + """ + selects.apply(obj = deps, function = native.partial(_enforce_deps, name = name)) + + augmented_deps = deps + [ + "//executorch/kernels/optimized:libvec", + "//executorch/kernels/optimized:libutils", + ] + + runtime.cxx_library( + name = "{}".format(name), + srcs = [ + "{}.cpp".format(name), + ], + visibility = [ + "//executorch/kernels/portable/test/...", + "//executorch/kernels/quantized/test/...", + "//executorch/kernels/optimized/test/...", + "//executorch/kernels/test/...", + "@EXECUTORCH_CLIENTS", + ], + # kernels often have helpers with no prototypes just disabling the warning here as the headers + # are codegend and linked in later + compiler_flags = ["-Wno-missing-prototypes"], + deps = [ + "//executorch/kernels:kernel_includes", + ] + augmented_deps, + fbandroid_platform_preprocessor_flags = get_vec_android_preprocessor_flags(), + # link_whole is necessary because the operators register themselves + # via static initializers that run at program startup. + # @lint-ignore BUCKLINT link_whole + link_whole = True, + ) + +def define_op_target(name, deps): + """Possibly defines cxx_library targets for the named operator group. + + Args: + name: The base name of the target; e.g., "op_add" + deps: List of deps for the targets. + """ + + # When building in ATen mode, ATen-compatible (non-custom) operators will + # use the implementations provided by ATen, so we should not build the + # versions defined here. + define_op_library( + name = name, + deps = deps, + ) diff --git a/kernels/optimized/targets.bzl b/kernels/optimized/targets.bzl new file mode 100644 index 00000000000..65db7392252 --- /dev/null +++ b/kernels/optimized/targets.bzl @@ -0,0 +1,54 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/codegen:codegen.bzl", "et_operator_library", "executorch_generated_lib") +load(":lib_defs.bzl", "define_libs") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + define_libs() + + runtime.export_file( + name = "optimized.yaml", + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + runtime.cxx_library( + name = "optimized_operators", + srcs = [], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/kernels/optimized/cpu:cpu_optimized", + ], + ) + + et_operator_library( + name = "optimized_oplist", + ops_schema_yaml_target = ":optimized.yaml", + ) + + # Used mainly for operator testing. In practice, a generated lib specific + # to a project should be created that contains only the required operators + # for a particular model. + executorch_generated_lib( + name = "generated_lib", + deps = [ + ":optimized_oplist", + ":optimized_operators", + ], + functions_yaml_target = ":optimized.yaml", + define_static_targets = True, + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + ) diff --git a/kernels/optimized/test/targets.bzl b/kernels/optimized/test/targets.bzl new file mode 100644 index 00000000000..d2ee2880c60 --- /dev/null +++ b/kernels/optimized/test/targets.bzl @@ -0,0 +1,43 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load( + "@fbsource//xplat/executorch/kernels/optimized:lib_defs.bzl", + "get_vec_android_preprocessor_flags", + "get_vec_cxx_preprocessor_flags", +) +load("@fbsource//xplat/executorch/kernels/test:util.bzl", "define_supported_features_lib") + +def _lib_test_bin(name, extra_deps = [], in_cpu = False): + """Defines a cxx_binary() for a single test file. + """ + if not (name.endswith("_test_bin")): + fail("'{}' must match the pattern '*_vec_test_bin'") + + src_root = name[:-len("_bin")] + lib_root = name[:-len("_test_bin")] + + cpu_path = "/cpu" if in_cpu else "" + + runtime.cxx_binary( + name = name, + srcs = [ + "{}.cpp".format(src_root), + ], + deps = [ + "//executorch/test/utils:utils", + "//executorch/kernels/optimized{}:{}".format(cpu_path, lib_root), + ] + extra_deps, + cxx_platform_preprocessor_flags = get_vec_cxx_preprocessor_flags(), + fbandroid_platform_preprocessor_flags = get_vec_android_preprocessor_flags(), + ) + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + define_supported_features_lib() + + _lib_test_bin("libvec_test_bin") + _lib_test_bin("moments_utils_test_bin", in_cpu = True) + _lib_test_bin("libblas_test_bin") diff --git a/kernels/portable/cpu/pattern/targets.bzl b/kernels/portable/cpu/pattern/targets.bzl new file mode 100644 index 00000000000..15136cf4531 --- /dev/null +++ b/kernels/portable/cpu/pattern/targets.bzl @@ -0,0 +1,25 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "pattern", + srcs = [ + "unary_ufunc_realb_to_bool.cpp", + "unary_ufunc_realb_to_float.cpp", + ], + exported_headers = [ + "pattern.h", + ], + compiler_flags = ["-Wno-missing-prototypes"], + deps = [ + "//executorch/kernels/portable/cpu/util:functional_util", + "//executorch/kernels:kernel_includes", + ], + visibility = ["//executorch/kernels/portable/cpu/...", "//executorch/kernels/optimized/cpu/..."], + ) diff --git a/kernels/portable/cpu/targets.bzl b/kernels/portable/cpu/targets.bzl new file mode 100644 index 00000000000..fe19ad8e617 --- /dev/null +++ b/kernels/portable/cpu/targets.bzl @@ -0,0 +1,828 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/kernels/portable:op_registration_util.bzl", "define_op_target", "op_target") + +# Operators that are listed in `functions.yaml`, and are thus compatible with +# the core ATen operators. Every entry here will be backed by a cxx_library +# target with the given name and deps. +# +# Note that a single target (or single .cpp file) can't mix ATen and non-ATen +# ops, and must be split. They can, however, share common code via a library dep +# if necessary. +_ATEN_OPS = ( + op_target( + name = "op_abs", + deps = [ + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_acos", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_acosh", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_add", + deps = [ + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_addmm", + deps = [ + "//executorch/kernels/portable/cpu/util:broadcast_util", + ":scalar_utils", + ":vec_ops", + ], + ), + op_target( + name = "op_amax", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:reduce_util", + ], + ), + op_target( + name = "op_amin", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:reduce_util", + ], + ), + op_target( + name = "op_any", + deps = [ + "//executorch/core/kernel_types/util:tensor_util", + ], + ), + op_target( + name = "op_arange", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_arange_start", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_argmax", + deps = [ + "//executorch/kernels/portable/cpu/util:reduce_util", + ], + ), + op_target( + name = "op_argmin", + deps = [ + "//executorch/kernels/portable/cpu/util:reduce_util", + ], + ), + op_target( + name = "op_as_strided_copy", + deps = [ + ":scalar_utils", + ], + ), + op_target( + name = "op_asin", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_asinh", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_atan", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_atanh", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_bitwise_and", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_bitwise_not", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_bitwise_or", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_bitwise_xor", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_pow", + deps = [ + ":scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_bmm", + deps = [ + ":vec_ops", + ], + ), + op_target( + name = "op_cat", + ), + op_target( + name = "op_clamp", + deps = [ + ":scalar_utils", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_clone", + ), + op_target( + name = "op_constant_pad_nd", + deps = [":scalar_utils"], + ), + op_target( + name = "op_convolution", + deps = [ + ":vec_ops", + ], + ), + op_target( + name = "op_copy", + deps = [ + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_cos", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_cosh", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_cumsum", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ], + ), + op_target( + name = "op_detach_copy", + deps = [ + "//executorch/core/kernel_types/util:tensor_util", + ], + ), + op_target( + name = "op_div", + deps = [ + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_embedding", + ), + op_target( + name = "op_eq", + deps = [ + ":scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_erf", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_exp", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_expand", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:repeat_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_fill", + deps = [ + ":scalar_utils", + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ], + ), + op_target( + name = "op_floor", + deps = [ + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_floor_divide", + ), + op_target( + name = "op_fmod", + deps = [ + ":scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_full", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_full_like", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_ge", + deps = [ + ":scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_gelu", + deps = [ + ":math_constants", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_glu", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ], + ), + op_target( + name = "op_gt", + deps = [ + ":scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_hardtanh", + deps = [ + "//executorch/kernels/portable/cpu/util:functional_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_index", + deps = [ + "//executorch/kernels/portable/cpu/util:index_util", + ], + ), + op_target( + name = "op_index_put", + deps = [ + "//executorch/kernels/portable/cpu/util:index_util", + ], + ), + op_target( + name = "op_index_select", + ), + op_target( + name = "op_isinf", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_isnan", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_le", + deps = [ + ":scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_leaky_relu", + deps = [ + "//executorch/kernels/portable/cpu/util:functional_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_lift_fresh_copy", + deps = [ + "//executorch/core/kernel_types/util:tensor_util", + ], + ), + op_target( + name = "op_log", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_log_softmax", + deps = [ + ":vec_ops", + "//executorch/kernels/portable/cpu/util:functional_util", + "//executorch/kernels/portable/cpu/util:reduce_util", + ], + ), + op_target( + name = "op_logical_and", + deps = [ + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_logical_not", + deps = [ + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_logical_or", + deps = [ + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_logical_xor", + deps = [ + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_logit", + deps = [ + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_lt", + deps = [ + ":scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_masked_fill", + deps = [ + "//executorch/kernels/portable/cpu/util:broadcast_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_max", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:reduce_util", + ], + ), + op_target( + name = "op_mean", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:reduce_util", + ], + ), + op_target( + name = "op_min", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:reduce_util", + ], + ), + op_target( + name = "op_minimum", + ), + op_target( + name = "op_mm", + deps = [ + ":vec_ops", + ], + ), + op_target( + name = "op_mul", + deps = [ + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ":scalar_utils", + ], + ), + op_target( + name = "op_native_layer_norm", + deps = [ + ":vec_ops", + ], + ), + op_target( + name = "op_ne", + deps = [ + ":scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_neg", + deps = [ + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_nonzero", + deps = [ + ":scalar_utils", + ], + ), + op_target( + name = "op_ones", + deps = [ + "//executorch/core/kernel_types/util:tensor_util", + ], + ), + op_target( + name = "op_permute", + ), + op_target( + name = "op_reciprocal", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_relu", + deps = [ + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_remainder", + deps = [ + ":scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_repeat", + deps = [ + "//executorch/kernels/portable/cpu/util:repeat_util", + ], + ), + op_target( + name = "op_round", + deps = [ + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_rsqrt", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_rsub_scalar", + deps = [ + ":scalar_utils", + ], + ), + op_target( + name = "op_scalar_tensor", + deps = [":scalar_utils"], + ), + op_target( + name = "op_scatter_add", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ], + ), + op_target( + name = "op_select", + ), + op_target( + name = "op_select_scatter", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ], + ), + op_target( + name = "op_sigmoid", + deps = [ + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_sign", + deps = [ + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_sin", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_sinh", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_slice", + ), + op_target( + name = "op_slice_scatter", + ), + op_target( + name = "op_softmax", + deps = [ + ":vec_ops", + "//executorch/kernels/portable/cpu/util:functional_util", + "//executorch/kernels/portable/cpu/util:reduce_util", + ], + ), + op_target( + name = "op_split", + ), + op_target( + name = "op_sqrt", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_squeeze", + ), + op_target( + name = "op_stack", + ), + op_target( + name = "op_sub", + deps = [ + ":scalar_utils", + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + ], + ), + op_target( + name = "op_sum", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:reduce_util", + ], + ), + op_target( + name = "op_t", + deps = ["//executorch/kernels/portable/cpu/util:transpose_util"], + ), + op_target( + name = "op_tan", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_tanh", + deps = [ + "//executorch/kernels/portable/cpu/pattern:pattern", + ], + ), + op_target( + name = "op_to", + ), + op_target( + name = "op_transpose", + deps = ["//executorch/kernels/portable/cpu/util:transpose_util"], + ), + op_target( + name = "op_tril", + deps = [ + "//executorch/core/kernel_types/util:tensor_util", + ], + ), + op_target( + name = "op_unbind", + ), + op_target( + name = "op_unsqueeze", + ), + op_target( + name = "op_var", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + "//executorch/kernels/portable/cpu/util:reduce_util", + ], + ), + op_target( + name = "op_view", + ), + op_target( + name = "op_where", + deps = [ + "//executorch/kernels/portable/cpu/util:broadcast_util", + "//executorch/kernels/portable/cpu/util:functional_util", + "//executorch/core/kernel_types:kernel_types", + ], + ), + op_target( + name = "op_zeros", + ), +) + +# Operators that are not listed in `functions.yaml` (i.e., operators listed in +# `custom_ops.yaml`), which are not compatible with the core ATen operators. +# Every entry here will be backed by a cxx_library target with the given name +# and deps, as well as a similar `_aten` target that uses at::Tensor and +# related types. +# +# Note that a single target (or single .cpp file) can't mix ATen and non-ATen +# ops, and must be split. They can, however, share common code via a library dep +# if necessary. +_CUSTOM_OPS = ( + op_target( + name = "op_allclose", + ), + op_target( + name = "op_linear_scratch_example", + ), +) + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + # Define build targets for all operators registered in the tables above. + for op in _ATEN_OPS: + define_op_target(is_aten_op = True, **op) + for op in _CUSTOM_OPS: + define_op_target(is_aten_op = False, **op) + + custom_op_targets = [":{}".format(op["name"]) for op in _CUSTOM_OPS] + + aten_op_targets = [":{}".format(op["name"]) for op in _ATEN_OPS] + all_op_targets = custom_op_targets + aten_op_targets + + runtime.cxx_library( + name = "cpu", + srcs = [], + visibility = [ + "//executorch/kernels/portable/...", + "//executorch/kernels/test/...", + ], + exported_deps = all_op_targets, + ) + + runtime.cxx_library( + name = "cpu_aten", + srcs = [], + visibility = ["//executorch/kernels/portable/..."], + exported_deps = [t + "_aten" for t in custom_op_targets], + ) + + # Only for use by op targets under //executorch. This API was inherited from + # Jarvis, and needs to be reevaluated before becoming a public API. + runtime.cxx_library( + name = "vec_ops", + srcs = [], + exported_headers = ["vec_ops.h"], + visibility = ["//executorch/kernels/portable/cpu/...", "//executorch/kernels/quantized/..."], + ) + + # Only for use by targets in this directory. Defines constants like M_PI + # if they arent already defined by the toolchains cmath + runtime.cxx_library( + name = "math_constants", + srcs = [], + exported_headers = [ + "math_constants.h", + ], + visibility = [ + "//executorch/kernels/portable/cpu/...", + ], + ) + + # Only for use by targets in this directory. + runtime.cxx_library( + name = "scalar_utils", + srcs = [], + exported_headers = ["scalar_utils.h"], + visibility = [ + "//executorch/kernels/portable/cpu/...", + "//executorch/kernels/optimized/cpu/...", + "@EXECUTORCH_CLIENTS", + ], + deps = [ + "//executorch/core/kernel_types:kernel_types", + "//executorch/core/kernel_types/util:scalar_type_util", + ], + ) diff --git a/kernels/portable/cpu/test/targets.bzl b/kernels/portable/cpu/test/targets.bzl new file mode 100644 index 00000000000..660909117c8 --- /dev/null +++ b/kernels/portable/cpu/test/targets.bzl @@ -0,0 +1,20 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + # + # NOTE: tests for operators should live in kernels/portable/test, so that + # they can be run against all implementations of a given operator. This + # directory is only for testing cpu-specific helper libraries. + # + + runtime.cxx_test( + name = "vec_ops_test", + srcs = ["vec_ops_test.cpp"], + deps = ["//executorch/kernels/portable/cpu:vec_ops"], + ) diff --git a/kernels/portable/cpu/util/targets.bzl b/kernels/portable/cpu/util/targets.bzl new file mode 100644 index 00000000000..8abdf4d656c --- /dev/null +++ b/kernels/portable/cpu/util/targets.bzl @@ -0,0 +1,89 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "repeat_util", + srcs = [ + "repeat_util.cpp", + ], + exported_headers = ["repeat_util.h"], + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ], + compiler_flags = ["-Wno-missing-prototypes"], + visibility = ["//executorch/kernels/portable/cpu/..."], + ) + + runtime.cxx_library( + name = "broadcast_util", + srcs = ["broadcast_util.cpp"], + exported_headers = [ + "broadcast_util.h", + ], + compiler_flags = ["-Wno-missing-prototypes"], + deps = [ + ":repeat_util", + "//executorch/kernels/portable/cpu:scalar_utils", + "//executorch/core/kernel_types:kernel_types", + "//executorch/core/kernel_types/util:tensor_util", + ], + visibility = ["//executorch/kernels/portable/cpu/...", "//executorch/kernels/optimized/cpu/..."], + ) + + runtime.cxx_library( + name = "transpose_util", + exported_headers = [ + "transpose_util.h", + ], + deps = [ + "//executorch/kernels/portable/cpu:scalar_utils", + "//executorch/core/kernel_types:kernel_types", + "//executorch/core/kernel_types/util:tensor_util", + ], + visibility = ["//executorch/kernels/portable/cpu/..."], + ) + + # Utility functions that can be used by operators that perform indexing + runtime.cxx_library( + name = "index_util", + srcs = ["index_util.cpp"], + exported_headers = ["index_util.h"], + deps = [ + "//executorch/core/kernel_types:kernel_types", + "//executorch/core/kernel_types/util:tensor_util", + ], + visibility = ["//executorch/kernels/portable/cpu/...", "//executorch/kernels/quantized/..."], + ) + + # Utility functions that can be used by operators that repeat the same computation for each element in the tensor + # Note that because this is a header only library, targets must also depend on broadcast_util. + runtime.cxx_library( + name = "functional_util", + srcs = [], + exported_headers = ["functional_util.h"], + deps = [ + "//executorch/core/kernel_types:kernel_types", + "//executorch/core/kernel_types/util:tensor_util", + ":broadcast_util", + ], + visibility = ["//executorch/kernels/portable/cpu/...", "//executorch/kernels/quantized/..."], + ) + + # Utility functions that can be used by operators that perform reduction + runtime.cxx_library( + name = "reduce_util", + srcs = ["reduce_util.cpp"], + exported_headers = ["reduce_util.h"], + deps = [ + "//executorch/core/kernel_types:kernel_types", + "//executorch/core/kernel_types/util:tensor_util", + ], + visibility = ["//executorch/kernels/portable/cpu/...", "//executorch/kernels/quantized/..."], + ) diff --git a/kernels/portable/cpu/util/test/targets.bzl b/kernels/portable/cpu/util/test/targets.bzl new file mode 100644 index 00000000000..349388cea66 --- /dev/null +++ b/kernels/portable/cpu/util/test/targets.bzl @@ -0,0 +1,23 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + runtime.cxx_test( + name = "broadcast_test", + srcs = ["broadcast_test.cpp"], + deps = [ + "//executorch/core/kernel_types:kernel_types", + "//executorch/core/kernel_types/testing:tensor_util", + "//executorch/core/values:executor_values", + "//executorch/kernels/portable/cpu/util:broadcast_util", + ], + ) + + runtime.cxx_test( + name = "reduce_test", + srcs = ["reduce_test.cpp"], + deps = [ + "//executorch/core/kernel_types:kernel_types", + "//executorch/core/kernel_types/testing:tensor_util", + "//executorch/kernels/portable/cpu/util:reduce_util", + ], + ) diff --git a/kernels/portable/op_registration_util.bzl b/kernels/portable/op_registration_util.bzl new file mode 100644 index 00000000000..93695e0be13 --- /dev/null +++ b/kernels/portable/op_registration_util.bzl @@ -0,0 +1,165 @@ +load("@fbsource//tools/build_defs:selects.bzl", "selects") +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def op_target(name, deps = [], android_deps = [], _allow_third_party_deps = False): + """Registers an implementation of an operator overload group. + + An operator overload group is a set of operator overloads with a common + operator name. That common operator name should be the base name of this + target. + + E.g., the "add" operator overload group, named "op_add" in this target, + might implement: + - add.Tensor + - add_.Tensor + - add.out + - add.Scalar + + If an op target would like to share a header/sources with a different op + target (e.g., helpers/utilities), it should declare a separate cxx_library + and add it as a dep. + + Args: + name: The name of the operator overload group; e.g., + "op_add". This directory must contain a source file named + ".cpp"; e.g., "op_add.cpp". + deps: Optional extra deps to add to the cxx_library(). Note: + - op targets may not depend on other op targets, to keep the + dependencies manageable. If two op targets would like to share + code, define a separate runtime.cxx_library that they both depend + on. + - op targets may not depend on targets outside of `//executorch`. + This library is intended to be portable, open-sourceable, and + self-contained. + android_deps: Optional extra deps to add to fb_xplat_cxx_library() + under fbandroid_platform_deps when building for Android, which may + be outside of //executorch. Note that these will be ignored when + building for fbcode. + _allow_third_party_deps: If True, the op is allowed to depend on + third-party deps outside of //executorch. Should only be used by + targets under //executorch/kernels/optimized, which can benefit + from third-party optimization libraries. + """ + + # Note that this doesn't actually define the target, but helps register + # it in a table that's used to define the target. + return { + "android_deps": android_deps, + "deps": deps, + "name": name, + "_allow_third_party_deps": _allow_third_party_deps, + } + +def _enforce_deps(deps, name, allow_third_party_deps): + """Fails if any of the deps are not allowed. + + Args: + deps: A list of build target strings. + name: The name of the target; e.g., "op_add" + name: The name of the target with the provided deps. + allow_third_party_deps: If True, allows external deps on third-party + targets. + """ + for dep in deps: + if dep.startswith(":op_"): + # op targets may not depend on other op targets, to keep the + # dependencies manageable. If two op targets would like to share + # code, define a separate runtime.cxx_library that they both depend + # on. + fail("op_target {} may not depend on other op_target {}".format( + name, + dep, + )) + if not (dep.startswith("//executorch") or dep.startswith(":")): + if allow_third_party_deps and ("/third-party/" in dep): + # Allowed exception. + pass + else: + # op targets may not depend on targets outside of + # `//executorch`. This library is intended to be portable, + # open-sourceable, and self-contained. + fail( + "op_target {} may not depend on code outside of //executorch: {}".format( + name, + dep, + ), + ) + +def define_op_library(name, deps, android_deps, aten_target, _allow_third_party_deps = False): + """Defines a cxx_library target for the named operator overload group. + + Args: + name: The name of the target; e.g., "op_add" + deps: List of deps for the target. + android_deps: List of fbandroid_platform_deps for the target. + aten_target: If True, define a "_aten" target that uses + `:kernel_types_aten`, compatible with host PyTorch. If False, define + a "" target that uses `:kernel_types`, compatible with the + embedded executorch runtime. + _allow_third_party_deps: If True, the op is allowed to depend on + third-party deps outside of //executorch. Should only be used by + targets under //executorch/kernels/optimized, which can benefit + from third-party optimization libraries. + """ + selects.apply(obj = deps, function = native.partial(_enforce_deps, name = name, allow_third_party_deps = _allow_third_party_deps)) + + aten_suffix = "_aten" if aten_target else "" + runtime.cxx_library( + name = name + aten_suffix, + srcs = [ + "{}.cpp".format(name), + ], + visibility = [ + "//executorch/kernels/portable/test/...", + "//executorch/kernels/quantized/test/...", + "//executorch/kernels/optimized/test/...", + "//executorch/kernels/test/...", + "@EXECUTORCH_CLIENTS", + ], + fbandroid_platform_deps = android_deps, + # kernels often have helpers with no prototypes just disabling the warning here as the headers + # are codegend and linked in later + compiler_flags = ["-Wno-missing-prototypes"], + deps = [ + "//executorch/kernels:kernel_includes" + aten_suffix, + ] + deps, + # link_whole is necessary because the operators register themselves + # via static initializers that run at program startup. + # @lint-ignore BUCKLINT link_whole + link_whole = True, + ) + +def define_op_target(name, deps, android_deps, is_aten_op, _allow_third_party_deps = False): + """Possibly defines cxx_library targets for the named operator group. + + Args: + name: The base name of the target; e.g., "op_add" + deps: List of deps for the targets. + android_deps: List of fbandroid_platform_deps for the target. + is_aten_op: True if the operator overload group is ATen-compatible. + _allow_third_party_deps: If True, the op is allowed to depend on + third-party deps outside of //executorch. Should only be used by + targets under //executorch/kernels/optimized. + """ + + # If this is a custom op, define a target that builds it with at::Tensor + # so that it can be imported into a host PyTorch environment for authoring. + if not is_aten_op: + define_op_library( + name = name, + deps = deps, + android_deps = android_deps, + aten_target = True, + _allow_third_party_deps = _allow_third_party_deps, + ) + + # When building in ATen mode, ATen-compatible (non-custom) operators will + # use the implementations provided by ATen, so we should not build the + # versions defined here. + define_op_library( + name = name, + deps = deps, + android_deps = android_deps, + aten_target = False, + _allow_third_party_deps = _allow_third_party_deps, + ) diff --git a/kernels/portable/targets.bzl b/kernels/portable/targets.bzl new file mode 100644 index 00000000000..14b34d56483 --- /dev/null +++ b/kernels/portable/targets.bzl @@ -0,0 +1,119 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/codegen:codegen.bzl", "et_operator_library", "executorch_generated_lib") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "operators", + srcs = [], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/kernels/portable/cpu:cpu", + ], + ) + + runtime.cxx_library( + name = "operators_aten", + srcs = [], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/kernels/portable/cpu:cpu_aten", + ], + ) + + runtime.export_file( + name = "functions.yaml", + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + runtime.export_file( + name = "custom_ops.yaml", + visibility = [ + "//executorch/codegen/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + et_operator_library( + name = "executorch_all_ops", + include_all_operators = True, + define_static_targets = True, + ) + + et_operator_library( + name = "executorch_aten_ops", + ops_schema_yaml_target = "//executorch/kernels/portable:functions.yaml", + define_static_targets = True, + ) + + et_operator_library( + name = "executorch_custom_ops", + ops_schema_yaml_target = "//executorch/kernels/portable:custom_ops.yaml", + define_static_targets = True, + ) + + generated_lib_common_args = { + "custom_ops_aten_kernel_deps": [ + "//executorch/kernels/portable:operators_aten", + ], + "custom_ops_yaml_target": "//executorch/kernels/portable:custom_ops.yaml", + # size_test expects _static targets to be available for these libraries. + "define_static_targets": True, + "functions_yaml_target": "//executorch/kernels/portable:functions.yaml", + "visibility": [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + } + + executorch_generated_lib( + name = "generated_lib", + deps = [ + ":executorch_aten_ops", + ":executorch_custom_ops", + "//executorch/kernels/portable:operators", + ], + **generated_lib_common_args + ) + + executorch_generated_lib( + name = "generated_lib_all_ops", + deps = [ + ":executorch_all_ops", + "//executorch/kernels/portable:operators", + ], + **generated_lib_common_args + ) + + executorch_generated_lib( + name = "generated_lib_aten", + deps = [ + ":executorch_aten_ops", + ":executorch_custom_ops", + "//executorch/kernels/portable:operators_aten", + ], + custom_ops_aten_kernel_deps = [ + "//executorch/kernels/portable:operators_aten", + ], + custom_ops_yaml_target = "//executorch/kernels/portable:custom_ops.yaml", + aten_mode = True, + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + define_static_targets = True, + ) diff --git a/kernels/portable/test/targets.bzl b/kernels/portable/test/targets.bzl new file mode 100644 index 00000000000..e0021f193a1 --- /dev/null +++ b/kernels/portable/test/targets.bzl @@ -0,0 +1,12 @@ +load("@fbsource//xplat/executorch/kernels/test:util.bzl", "define_supported_features_lib", "op_test") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + define_supported_features_lib() + + op_test(name = "op_allclose_test", aten_compatible = False) + op_test(name = "op_mul_test") diff --git a/kernels/quantized/cpu/targets.bzl b/kernels/quantized/cpu/targets.bzl new file mode 100644 index 00000000000..de7bc633d9c --- /dev/null +++ b/kernels/quantized/cpu/targets.bzl @@ -0,0 +1,43 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/kernels/portable:op_registration_util.bzl", "define_op_target", "op_target") + +_QUANT_OPS = ( + op_target( + name = "op_add", + ), + op_target( + name = "op_choose_qparams", + deps = [ + "//executorch/kernels/portable/cpu:vec_ops", + ], + ), + op_target( + name = "op_dequantize", + ), + op_target( + name = "op_embedding", + ), + op_target( + name = "op_quantize", + ), +) + +def define_common_targets(): + for op in _QUANT_OPS: + define_op_target(is_aten_op = False, **op) + + quant_op_targets = [":{}".format(op["name"]) for op in _QUANT_OPS] + + runtime.cxx_library( + name = "quantized_cpu", + srcs = [], + visibility = ["//executorch/kernels/quantized/..."], + exported_deps = quant_op_targets, + ) + + runtime.cxx_library( + name = "quantized_cpu_aten", + srcs = [], + visibility = ["//executorch/kernels/quantized/..."], + exported_deps = [t + "_aten" for t in quant_op_targets], + ) diff --git a/kernels/quantized/targets.bzl b/kernels/quantized/targets.bzl new file mode 100644 index 00000000000..1cb66a87783 --- /dev/null +++ b/kernels/quantized/targets.bzl @@ -0,0 +1,47 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/codegen:codegen.bzl", "et_operator_library", "executorch_generated_lib") + +def define_common_targets(): + runtime.export_file( + name = "quantized.yaml", + visibility = [ + "@EXECUTORCH_CLIENTS", + ], + ) + + et_operator_library( + name = "all_quantized_ops", + ops_schema_yaml_target = ":quantized.yaml", + define_static_targets = True, + ) + + for aten_mode in (True, False): + aten_suffix = "_aten" if aten_mode else "" + + runtime.cxx_library( + name = "quantized_operators" + aten_suffix, + srcs = [], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/kernels/quantized/cpu:quantized_cpu" + aten_suffix, + ], + ) + + executorch_generated_lib( + name = "generated_lib" + aten_suffix, + deps = [ + ":quantized_operators" + aten_suffix, + ":all_quantized_ops", + ], + custom_ops_yaml_target = ":quantized.yaml", + custom_ops_aten_kernel_deps = [":quantized_operators_aten"] if aten_mode else [], + aten_mode = aten_mode, + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + define_static_targets = True, + ) diff --git a/kernels/quantized/test/targets.bzl b/kernels/quantized/test/targets.bzl new file mode 100644 index 00000000000..e608aaf3388 --- /dev/null +++ b/kernels/quantized/test/targets.bzl @@ -0,0 +1,27 @@ +load("@fbsource//xplat/executorch/kernels/test:util.bzl", "define_supported_features_lib", "op_test") + +def define_common_targets(): + define_supported_features_lib() + + op_test("op_quantize_test", kernel_name = "quantized") + op_test("op_dequantize_test", kernel_name = "quantized") + op_test("op_choose_qparams_test", kernel_name = "quantized") + op_test("op_add_test", kernel_name = "quantized", deps = [ + "//executorch/kernels/quantized/cpu:op_dequantize", + "//executorch/kernels/quantized/cpu:op_quantize", + "//executorch/kernels/quantized/cpu:op_add", + "//executorch/kernels/quantized:generated_lib_headers", + "//executorch/kernels/portable:generated_lib_headers", + "//executorch/kernels/portable/cpu:op_add", + "//executorch/core/kernel_types/testing:tensor_util", + ]) + op_test("op_embedding_test", kernel_name = "quantized", deps = [ + "//executorch/kernels/quantized/cpu:op_dequantize", + "//executorch/kernels/quantized/cpu:op_quantize", + "//executorch/kernels/quantized/cpu:op_add", + "//executorch/kernels/quantized/cpu:op_embedding", + "//executorch/kernels/quantized:generated_lib_headers", + "//executorch/kernels/portable:generated_lib_headers", + "//executorch/kernels/portable/cpu:op_embedding", + "//executorch/core/kernel_types/testing:tensor_util", + ]) diff --git a/kernels/targets.bzl b/kernels/targets.bzl new file mode 100644 index 00000000000..f332c6ce452 --- /dev/null +++ b/kernels/targets.bzl @@ -0,0 +1,47 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + for aten_mode in (True, False): + aten_suffix = "_aten" if aten_mode else "" + + runtime.cxx_library( + name = "kernel_runtime_context" + aten_suffix, + exported_headers = [ + "kernel_runtime_context.h", + ], + visibility = [ + "//executorch/core/prim_ops/...", # Contains kernels + "//executorch/core/test/...", # Codegen tests + "//executorch/executor/...", + "//executorch/kernels/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/core:core", + "//executorch/core/kernel_types:kernel_types" + aten_suffix, + ], + ) + + runtime.cxx_library( + name = "kernel_includes" + aten_suffix, + exported_headers = [ + "kernel_includes.h", + ], + visibility = [ + "//executorch/kernels/...", + "//executorch/core/prim_ops/...", # Prim kernels + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + ":kernel_runtime_context" + aten_suffix, + "//executorch/core/kernel_types:kernel_types" + aten_suffix, + "//executorch/core/kernel_types/util:scalar_type_util" + aten_suffix, + "//executorch/core/kernel_types/util:tensor_util" + aten_suffix, + ], + ) diff --git a/kernels/test/custom_kernel_example/targets.bzl b/kernels/test/custom_kernel_example/targets.bzl new file mode 100644 index 00000000000..388d7c19731 --- /dev/null +++ b/kernels/test/custom_kernel_example/targets.bzl @@ -0,0 +1,50 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/codegen:codegen.bzl", "et_operator_library", "executorch_generated_lib") +load("@fbsource//xplat/executorch/kernels/portable:op_registration_util.bzl", "define_op_target", "op_target") + +MY_ATEN_COMPLIANT_OPS = ( + op_target( + name = "op_relu", + deps = [ + "//executorch/core/kernel_types/util:scalar_type_util", + "//executorch/core/kernel_types/util:tensor_util", + ], + ), +) + +def define_common_targets(): + for op in MY_ATEN_COMPLIANT_OPS: + define_op_target(is_aten_op = True, **op) + + all_op_targets = [":{}".format(op["name"]) for op in MY_ATEN_COMPLIANT_OPS] + + runtime.export_file( + name = "my_functions.yaml", + visibility = ["//executorch/..."], + ) + + runtime.cxx_library( + name = "my_operators", + srcs = [], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = all_op_targets, + ) + + et_operator_library( + name = "my_ops_list", + _is_external_target = True, + ops_schema_yaml_target = ":my_functions.yaml", + ) + + executorch_generated_lib( + name = "generated_lib", + deps = [ + ":my_ops_list", + ":my_operators", + ], + functions_yaml_target = ":my_functions.yaml", + define_static_targets = True, + ) diff --git a/kernels/test/custom_kernel_example/tests.bzl b/kernels/test/custom_kernel_example/tests.bzl new file mode 100644 index 00000000000..c9c73b0cda5 --- /dev/null +++ b/kernels/test/custom_kernel_example/tests.bzl @@ -0,0 +1,28 @@ +load("@fbsource//xplat/executorch/kernels/test:util.bzl", "define_supported_features_lib", "generated_op_test") +load(":targets.bzl", "MY_ATEN_COMPLIANT_OPS") + +def define_common_test_targets(): + # Step 1: Define the function header wrapper in executorch/kernels/test/targets.bzl, like + # `codegen_function_header_wrapper("executorch/kernels/test/custom_kernel_example", "custom_kernel_example")` + # or generally `codegen_function_header_wrapper("/", "")` + # This is needed because tests need to know our Functions.h target. + # TODO(T149423767): We should codegen this wrapper in #include, not let user define it. + + # Step 2: Use the helper to produce the supported feature list for tests. + # Need to override some default features if different. + # See executorch/kernels/test/supported_features.yaml and supported_features_def_example.yaml. + define_supported_features_lib() + + # Step 3: Use the helper generated_op_test to re-use existing tests + for op in MY_ATEN_COMPLIANT_OPS: + op_name = op["name"] + + generated_op_test( + name = op_name + "_test", + op_impl_target = ":my_operators", + generated_lib_headers_target = ":generated_lib_headers", + + # those two targets are defined in previous steps + supported_features_target = ":supported_features", + function_header_wrapper_target = "//executorch/kernels/test:function_header_wrapper_custom_kernel_example", + ) diff --git a/kernels/test/targets.bzl b/kernels/test/targets.bzl new file mode 100644 index 00000000000..ef27ab259c0 --- /dev/null +++ b/kernels/test/targets.bzl @@ -0,0 +1,276 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/kernels/test:util.bzl", "codegen_function_header_wrapper", "generated_op_test", "op_test") + +def _common_op_test(name, kernels, aten_compatible = True): + """ + Defines test targets in format of _op__test + For ATen kernel testing, let's use portable functions.yaml for tested ops. + """ + for kernel in kernels: + deps = [":function_header_wrapper_{}".format(kernel)] + op_test(name, aten_compatible = aten_compatible, kernel_name = kernel, use_kernel_prefix = True, deps = deps) + +def make_example_generated_op_test_target(): + """ + Makes a test for kernels/test/util generated_op_test() helper + Here we use portable kernel. Try with `buck test xplat/executorch/kernels/test:op_<>_test` + """ + op_test_cpp_files = native.glob(["op_*_test.cpp"]) + + # The op name is from the beginning to the part without `_test.cpp` (:-9) + op_to_test = [f[:-9] for f in op_test_cpp_files] + for op_name in op_to_test: + generated_op_test( + op_name + "_test", + "//executorch/kernels/portable/cpu:{}".format(op_name), + "//executorch/kernels/portable:generated_lib_headers", + "//executorch/kernels/portable/test:supported_features", + "//executorch/kernels/test:function_header_wrapper_portable", + ) + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + for aten_kernel in (True, False): + aten_suffix = "_aten" if aten_kernel else "" + runtime.cxx_library( + name = "test_util" + aten_suffix, + exported_headers = [ + "TestUtil.h", + ], + visibility = [ + "//executorch/kernels/...", + "@EXECUTORCH_CLIENTS", + ], + preprocessor_flags = ["-DUSE_ATEN_LIB"] if aten_kernel else [], + fbcode_exported_deps = [ + "//common/init:init", + "//common/gtest:gtest", + ], + xplat_exported_deps = [ + "//xplat/folly:init_init", + "//xplat/third-party/gmock:gtest", + ], + ) + + runtime.cxx_test( + name = "kernel_runtime_context_test" + aten_suffix, + srcs = ["kernel_runtime_context_test.cpp"], + deps = [ + "//executorch/kernels:kernel_runtime_context" + aten_suffix, + ], + ) + + runtime.python_binary( + name = "gen_supported_features", + main_module = "executorch.kernels.test.gen_supported_features", + deps = [ + "fbsource//third-party/pkg_resources:pkg_resources", + "fbsource//third-party/pypi/pyyaml:pyyaml", + ":gen_supported_features_lib", + ], + visibility = [ + "//executorch/kernels/...", + ], + ) + + runtime.python_library( + name = "gen_supported_features_lib", + srcs = ["gen_supported_features.py"], + resources = [ + "supported_features_header.ini", + "supported_features_definition.ini", + ], + base_module = "executorch.kernels.test", + visibility = ["//executorch/kernels/test/..."], + deps = [ + "fbsource//third-party/pypi/pyyaml:pyyaml", + ], + ) + + runtime.genrule( + name = "supported_feature_header_gen", + cmd = "$(exe //executorch/kernels/test:gen_supported_features) ${SRCS} > $OUT/supported_features.h", + srcs = ["supported_features.yaml"], + outs = {"supported_features.h": ["supported_features.h"]}, + default_outs = ["."], + ) + + runtime.cxx_library( + name = "supported_features_header", + srcs = [], + exported_headers = {"supported_features.h": ":supported_feature_header_gen[supported_features.h]"}, + visibility = [ + "//executorch/kernels/...", + ], + ) + + runtime.genrule( + name = "supported_feature_aten_gen", + cmd = "$(exe //executorch/kernels/test:gen_supported_features) ${SRCS} > $OUT/supported_features_aten.cpp", + srcs = ["supported_features_def_aten.yaml"], + outs = {"supported_features_aten.cpp": ["supported_features_aten.cpp"]}, + default_outs = ["."], + ) + + runtime.cxx_library( + name = "supported_features_aten", + srcs = [":supported_feature_aten_gen[supported_features_aten.cpp]"], + visibility = [ + "//executorch/kernels/...", + ], + exported_deps = [ + "//executorch/kernels/test:supported_features_header", + ], + ) + + TEST_SRCS = native.glob(["op_*_test.cpp"]) + + runtime.filegroup( + name = "test_srcs", + srcs = TEST_SRCS, + visibility = [ + "//executorch/kernels/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + runtime.genrule( + name = "test_srcs_gen", + srcs = [":test_srcs"], + cmd = "cp $(location :test_srcs)/* $OUT", + outs = {f: [f] for f in TEST_SRCS}, + default_outs = ["."], + visibility = [ + "//executorch/kernels/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + codegen_function_header_wrapper("executorch/kernels/aten", "aten") + codegen_function_header_wrapper("executorch/kernels/portable", "portable") + codegen_function_header_wrapper("executorch/kernels/optimized", "optimized") + codegen_function_header_wrapper("executorch/kernels/quantized", "quantized") + codegen_function_header_wrapper("executorch/kernels/test/custom_kernel_example", "custom_kernel_example") + + _common_op_test("op_abs_test", ["aten", "portable"]) + _common_op_test("op_acos_test", ["aten", "portable"]) + _common_op_test("op_acosh_test", ["aten", "portable"]) + _common_op_test("op_add_test", ["aten", "portable", "optimized"]) + _common_op_test("op_addmm_test", ["aten", "portable"]) + _common_op_test("op_amax_test", ["aten", "portable"]) + _common_op_test("op_amin_test", ["aten", "portable"]) + _common_op_test("op_any_test", ["aten", "portable"]) + _common_op_test("op_arange_start_test", ["aten", "portable"]) + _common_op_test("op_arange_test", ["aten", "portable"]) + _common_op_test("op_argmax_test", ["aten", "portable"]) + _common_op_test("op_argmin_test", ["aten", "portable"]) + _common_op_test("op_as_strided_copy_test", ["aten", "portable"]) + _common_op_test("op_asin_test", ["aten", "portable"]) + _common_op_test("op_asinh_test", ["aten", "portable"]) + _common_op_test("op_atan_test", ["aten", "portable"]) + _common_op_test("op_atanh_test", ["aten", "portable"]) + _common_op_test("op_bitwise_and_test", ["aten", "portable"]) + _common_op_test("op_bitwise_not_test", ["aten", "portable"]) + _common_op_test("op_bitwise_or_test", ["aten", "portable"]) + _common_op_test("op_bitwise_xor_test", ["aten", "portable"]) + _common_op_test("op_bmm_test", ["aten", "portable", "optimized"]) + _common_op_test("op_cat_test", ["aten", "portable"]) + _common_op_test("op_clamp_test", ["aten", "portable"]) + _common_op_test("op_clone_test", ["aten", "portable"]) + _common_op_test("op_constant_pad_nd_test", ["aten", "portable"]) + _common_op_test("op_convolution_test", ["aten", "portable"]) + _common_op_test("op_copy_test", ["aten", "portable"]) + _common_op_test("op_cos_test", ["aten", "portable"]) + _common_op_test("op_cosh_test", ["aten", "portable"]) + _common_op_test("op_cumsum_test", ["aten", "portable"]) + _common_op_test("op_detach_copy_test", ["aten", "portable"]) + _common_op_test("op_div_test", ["aten", "portable", "optimized"]) + _common_op_test("op_embedding_test", ["aten", "portable"]) + _common_op_test("op_eq_test", ["aten", "portable"]) + _common_op_test("op_erf_test", ["aten", "portable"]) + _common_op_test("op_exp_test", ["aten", "portable", "optimized"]) + _common_op_test("op_expand_test", ["aten", "portable"]) + _common_op_test("op_fill_test", ["aten", "portable"]) + _common_op_test("op_floor_divide_test", ["aten", "portable"]) + _common_op_test("op_floor_test", ["aten", "portable"]) + _common_op_test("op_fmod_test", ["aten", "portable"]) + _common_op_test("op_full_like_test", ["aten", "portable"]) + _common_op_test("op_full_test", ["aten", "portable"]) + _common_op_test("op_ge_test", ["aten", "portable"]) + _common_op_test("op_gelu_test", ["aten", "portable", "optimized"]) + _common_op_test("op_glu_test", ["aten", "portable"]) + _common_op_test("op_gt_test", ["aten", "portable"]) + _common_op_test("op_hardtanh_test", ["portable"]) + _common_op_test("op_index_put_test", ["aten", "portable"]) + _common_op_test("op_index_select_test", ["aten", "portable"]) + _common_op_test("op_index_test", ["aten", "portable"]) + _common_op_test("op_isinf_test", ["aten", "portable"]) + _common_op_test("op_isnan_test", ["aten", "portable"]) + _common_op_test("op_le_test", ["aten", "portable", "optimized"]) + _common_op_test("op_leaky_relu_test", ["portable"]) + _common_op_test("op_lift_fresh_copy_test", ["aten", "portable"]) + _common_op_test("op_log_softmax_test", ["aten", "portable", "optimized"]) + _common_op_test("op_log_test", ["aten", "portable"]) + _common_op_test("op_logical_and_test", ["aten", "portable"]) + _common_op_test("op_logical_not_test", ["aten", "portable"]) + _common_op_test("op_logical_or_test", ["aten", "portable"]) + _common_op_test("op_logical_xor_test", ["aten", "portable"]) + _common_op_test("op_logit_test", ["aten", "portable"]) + _common_op_test("op_lt_test", ["aten", "portable"]) + _common_op_test("op_masked_fill_test", ["aten", "portable"]) + _common_op_test("op_max_test", ["aten", "portable"]) + _common_op_test("op_mean_test", ["aten", "portable"]) + _common_op_test("op_min_test", ["aten", "portable"]) + _common_op_test("op_minimum_test", ["aten", "portable"]) + _common_op_test("op_mm_test", ["aten", "portable"]) + _common_op_test("op_mul_test", ["aten", "portable", "optimized"]) + _common_op_test("op_pow_test", ["aten", "portable"]) + _common_op_test("op_native_layer_norm_test", ["aten", "portable", "optimized"]) + _common_op_test("op_ne_test", ["aten", "portable"]) + _common_op_test("op_neg_test", ["aten", "portable", "optimized"]) + _common_op_test("op_nonzero_test", ["aten", "portable"]) + _common_op_test("op_ones_test", ["aten", "portable"]) + _common_op_test("op_permute_test", ["aten", "portable"]) + _common_op_test("op_reciprocal_test", ["aten", "portable"]) + _common_op_test("op_relu_test", ["aten", "portable"]) + _common_op_test("op_remainder_test", ["aten", "portable"]) + _common_op_test("op_repeat_test", ["aten", "portable"]) + _common_op_test("op_round_test", ["aten", "portable"]) + _common_op_test("op_rsqrt_test", ["aten", "portable"]) + _common_op_test("op_rsub_scalar_test", ["aten", "portable"]) + _common_op_test("op_scalar_tensor_test", ["aten", "portable"]) + _common_op_test("op_scatter_add_test", ["aten", "portable"]) + _common_op_test("op_select_scatter_test", ["aten", "portable"]) + _common_op_test("op_select_test", ["aten", "portable"]) + _common_op_test("op_sigmoid_test", ["aten", "portable"]) + _common_op_test("op_sign_test", ["aten", "portable"]) + _common_op_test("op_sin_test", ["aten", "portable"]) + _common_op_test("op_sinh_test", ["aten", "portable"]) + _common_op_test("op_slice_scatter_test", ["aten", "portable"]) + _common_op_test("op_slice_test", ["aten", "portable"]) + _common_op_test("op_softmax_test", ["aten", "portable"]) + _common_op_test("op_split_test", ["aten", "portable"]) + _common_op_test("op_sqrt_test", ["aten", "portable"]) + _common_op_test("op_squeeze_test", ["aten", "portable"]) + _common_op_test("op_stack_test", ["aten", "portable"]) + _common_op_test("op_sub_test", ["aten", "portable", "optimized"]) + _common_op_test("op_sum_test", ["aten", "portable"]) + _common_op_test("op_t_test", ["aten", "portable"]) + _common_op_test("op_tan_test", ["aten", "portable"]) + _common_op_test("op_tanh_test", ["aten", "portable"]) + _common_op_test("op_to_test", ["aten", "portable"]) + _common_op_test("op_transpose_test", ["aten", "portable"]) + _common_op_test("op_tril_test", ["aten", "portable"]) + _common_op_test("op_unbind_test", ["aten", "portable"]) + _common_op_test("op_unsqueeze_test", ["aten", "portable"]) + _common_op_test("op_var_test", ["aten", "portable"]) + _common_op_test("op_view_test", ["aten", "portable"]) + _common_op_test("op_where_test", ["aten", "portable"]) + _common_op_test("op_zeros_test", ["aten", "portable"]) + + make_example_generated_op_test_target() diff --git a/kernels/test/util.bzl b/kernels/test/util.bzl new file mode 100644 index 00000000000..68ca83906b5 --- /dev/null +++ b/kernels/test/util.bzl @@ -0,0 +1,141 @@ +load("@fbsource//tools/build_defs:fbsource_utils.bzl", "is_xplat") +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def op_test(name, deps = [], aten_compatible = True, kernel_name = "portable", use_kernel_prefix = False): + """Defines a cxx_test() for an "op_*_test.cpp" file. + + Args: + name: "op__test"; e.g., "op_add_test". Must match + the non-extension part of the test source file (e.g., + "op_add_test.cpp"). This name must also agree with the target names + under //kernels//...; e.g., "op_add_test" will depend on + "//kernels/portable/cpu:op_add". + deps: Optional extra deps to add to the cxx_test(). + aten_compatible: If True, the operator under test is ATen-compatible + (i.e., appears in `functions.yaml`). + kernel_name: The name string as in //executorch/kernels/. + use_kernel_prefix: If True, the target name is + _op__test. Used by common kernel testing. + """ + if not (name.startswith("op_") and name.endswith("_test")): + fail("'{}' must match the pattern 'op_*_test'") + op_root = name[:-len("_test")] # E.g., "op_add" if name is "op_add_test". + + if kernel_name == "aten": + generated_lib_and_op_deps = [ + "//executorch/kernels/aten:generated_lib", + "//executorch/kernels/aten:generated_lib_headers", + "//executorch/kernels/test:supported_features_aten", + ] + else: + generated_lib_and_op_deps = [ + "//executorch/kernels/{}/cpu:{}".format(kernel_name, op_root), + "//executorch/kernels/{}:generated_lib_headers".format(kernel_name), + "//executorch/kernels/{}/test:supported_features".format(kernel_name), + ] + + name_prefix = "" + aten_suffix = "" + if kernel_name == "aten": + # For aten kernel, we need to use aten specific utils and types + name_prefix = "aten_" + aten_suffix = "_aten" + elif use_kernel_prefix: + name_prefix = kernel_name + "_" + runtime.cxx_test( + name = name_prefix + name, + srcs = [ + "{}.cpp".format(name), + ], + visibility = ["//executorch/kernels/..."], + deps = [ + "//executorch/core/kernel_types:kernel_types" + aten_suffix, + "//executorch/core/kernel_types/testing:tensor_util" + aten_suffix, + "//executorch/kernels/test:test_util" + aten_suffix, + ] + generated_lib_and_op_deps + deps, + ) + +def generated_op_test(name, op_impl_target, generated_lib_headers_target, supported_features_target, function_header_wrapper_target, deps = []): + """ + Build rule for testing an aten compliant op from an external kernel + (outside of executorch/) and re-use test cases here, so we can compare + between the external kernel and portable. + + Args: + name: "op__test"; e.g., "op_add_test". + mandatory dependency targets: + - op_impl_target (e.g. executorch/kernels/portable/cpu:op_add) + required for testing the kernel impl + - generated_lib_headers_target (e.g. executorch/kernels/portable:generated_lib_headers) + required for dispatching op to the specific kernel + - supported_features_target (e.g. executorch/kernels/portable/test:supported_features) + required so we know which features that kernel support, and bypass unsupported tests + - function_header_wrapper_target (e.g. executorch/kernels/portable/test:function_header_wrapper_portable) + required so we can include a header wrapper for Functions.h. Use codegen_function_header_wrapper() to generate. + deps: additional deps + """ + runtime.cxx_test( + name = name, + srcs = [ + "fbsource//xplat/executorch/kernels/test:test_srcs_gen[{}.cpp]".format(name), + ] if is_xplat() else [ + "//executorch/kernels/test:test_srcs_gen[{}.cpp]".format(name), + ], + deps = [ + "//executorch/core/kernel_types:kernel_types", + "//executorch/core/kernel_types/testing:tensor_util", + "//executorch/kernels/test:test_util", + op_impl_target, + generated_lib_headers_target, + supported_features_target, + function_header_wrapper_target, + ] + deps, + ) + +def define_supported_features_lib(): + runtime.genrule( + name = "supported_feature_gen", + cmd = "$(exe //executorch/kernels/test:gen_supported_features) ${SRCS} > $OUT/supported_features.cpp", + srcs = ["supported_features_def.yaml"], + outs = {"supported_features.cpp": ["supported_features.cpp"]}, + default_outs = ["."], + ) + + runtime.cxx_library( + name = "supported_features", + srcs = [":supported_feature_gen[supported_features.cpp]"], + visibility = [ + "//executorch/kernels/...", + ], + exported_deps = [ + "//executorch/kernels/test:supported_features_header", + ], + ) + +def codegen_function_header_wrapper(kernel_path, kernel_name): + """Produces a file (FunctionHeaderWrapper.h) which simply includes the real + Functions.h for the specified kernel. + + Generate the wrapper for each kernel (except aten where we can use portable). + Use target "function_header_wrapper_" in tests. + + For ATen kernel, use portable as we use its functions.yaml + """ + header = "\"#include <{}/Functions.h>\"".format(kernel_path) + + runtime.genrule( + name = "gen_function_header_wrapper_{}".format(kernel_name), + cmd = "echo " + header + " > $OUT/FunctionHeaderWrapper.h", + outs = {"FunctionHeaderWrapper.h": ["FunctionHeaderWrapper.h"]}, + default_outs = ["."], + ) + + runtime.cxx_library( + name = "function_header_wrapper_{}".format(kernel_name), + exported_headers = { + "FunctionHeaderWrapper.h": ":gen_function_header_wrapper_{}[FunctionHeaderWrapper.h]".format(kernel_name), + }, + # TODO(T149423767): So far we have to expose this to users. Ideally this part can also be codegen. + _is_external_target = True, + visibility = ["//executorch/...", "//pye/..."], + ) diff --git a/platform/targets.bzl b/platform/targets.bzl new file mode 100644 index 00000000000..4ac6e57623d --- /dev/null +++ b/platform/targets.bzl @@ -0,0 +1,48 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def _select_pal(dict_): + """Returns an element of `dict_` based on the value of the + `executorch.pal_default` build config value. Fails if no corresponding entry + exists. + """ + pal_default = native.read_config("executorch", "pal_default", "posix") + if not pal_default in dict_: + fail("Missing key for executorch.pal_default value '{}' in dict '{}'".format(pal_default, dict_)) + return dict_[pal_default] + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "platform_private", + srcs = _select_pal({ + "minimal": ["target/Minimal.cpp"], + "posix": ["target/Posix.cpp"], + }), + deps = [ + ":platform", + ], + visibility = [ + "//executorch/core/...", + ], + ) + + runtime.cxx_library( + name = "platform", + exported_headers = [ + "Platform.h", + "System.h", + "Types.h", + ], + exported_deps = [ + "//executorch/compiler:compiler", + ], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + ) diff --git a/platform/test/targets.bzl b/platform/test/targets.bzl new file mode 100644 index 00000000000..2ae6623a09a --- /dev/null +++ b/platform/test/targets.bzl @@ -0,0 +1,60 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_test( + name = "platform_test", + srcs = [ + "ExecutorPalTest.cpp", + ], + deps = [ + "//executorch/core:core", + ], + ) + + runtime.cxx_test( + name = "platform_death_test", + srcs = [ + "ExecutorPalDeathTest.cpp", + ], + deps = [ + "//executorch/core:core", + ], + ) + + # This is an example of a target that provides a PAL implementation. Note + # the `link_whole = True` parameter, which is necessary to ensure that the + # symbols make their way into the top-level binary. If this target were to + # be added to a library instead of directly to a binary, it would need to be + # in that library's `exported_deps`. + runtime.cxx_library( + name = "stub_platform", + srcs = [ + "StubPlatform.cpp", + ], + exported_headers = [ + "StubPlatform.h", + ], + deps = [ + "//executorch/compiler:compiler", + "//executorch/platform:platform", + "//executorch/test/utils:utils", # gtest.h + ], + visibility = [], + ) + + runtime.cxx_test( + name = "platform_override_test", + srcs = [ + "ExecutorPalOverrideTest.cpp", + ], + deps = [ + "//executorch/core:core", + ":stub_platform", + ], + ) diff --git a/profiler/profiler.bzl b/profiler/profiler.bzl new file mode 100644 index 00000000000..25da7d141e9 --- /dev/null +++ b/profiler/profiler.bzl @@ -0,0 +1,22 @@ +""" +Common macros used by the profiler go into this file. +""" + +def profiling_enabled(): + return native.read_config("executorch", "prof_enabled", "false") == "true" + +def get_profiling_flags(): + profiling_flags = [] + if profiling_enabled(): + profiling_flags += ["-DPROFILING_ENABLED"] + prof_buf_size = native.read_config("executorch", "prof_buf_size", None) + if prof_buf_size != None: + if not profiling_enabled(): + fail("Cannot set profiling buffer size without enabling profiling first.") + profiling_flags += ["-DMAX_PROFILE_EVENTS={}".format(prof_buf_size), "-DMAX_MEM_PROFILE_EVENTS={}".format(prof_buf_size)] + num_prof_blocks = native.read_config("executorch", "num_prof_blocks", None) + if num_prof_blocks != None: + if not profiling_enabled(): + fail("Cannot configure number of profiling blocks without enabling profiling first.") + profiling_flags += ["-DMAX_PROFILE_BLOCKS={}".format(num_prof_blocks)] + return profiling_flags diff --git a/profiler/targets.bzl b/profiler/targets.bzl new file mode 100644 index 00000000000..d90b179c31f --- /dev/null +++ b/profiler/targets.bzl @@ -0,0 +1,37 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/profiler:profiler.bzl", "get_profiling_flags") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "profiler", + srcs = [ + "profiler.cpp", + ] + select({ + "DEFAULT": ["linux_hooks.cpp"], + "ovr_config//cpu:xtensa": ["xtensa_executorch_hooks.cpp"], + }), + exported_preprocessor_flags = get_profiling_flags(), + exported_headers = [ + "profiler.h", + "hooks.h", + ], + deps = [ + "//executorch/core:abort", + "//executorch/core:log", + ], + visibility = [ + "//executorch/backends/...", + "//executorch/codegen/...", + "//executorch/core/...", + "//executorch/executor/...", + "//executorch/kernels/...", + "//executorch/test/...", + "@EXECUTORCH_CLIENTS", + ], + ) diff --git a/pybindings/targets.bzl b/pybindings/targets.bzl new file mode 100644 index 00000000000..0fe7d9f14cb --- /dev/null +++ b/pybindings/targets.bzl @@ -0,0 +1,89 @@ +load("@fbsource//xplat/executorch/backends:backends.bzl", "get_all_cpu_backend_targets") +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +# Aten ops with portable kernel +MODELS_ATEN_OPS_LEAN_MODE_GENERATED_LIB = [ + "//executorch/kernels/portable:generated_lib", +] + +# Custom ops with portable kernel +MODELS_CUSTOM_OPS_LEAN_MODE_GENERATED_LIB = [ + "//executorch/kernels/quantized:generated_lib", + "//pye/model_inventory/asr_models/runtime:generated_custom_op_lib_lean", +] + +MODELS_ALL_OPS_LEAN_MODE_GENERATED_LIB = MODELS_ATEN_OPS_LEAN_MODE_GENERATED_LIB + MODELS_CUSTOM_OPS_LEAN_MODE_GENERATED_LIB + +MODULE_DEPS = [ + "//caffe2:ATen", + "//caffe2:torch", + "//caffe2:torch_extension", + "//executorch/core:operator_registry", + "//executorch/executor:executor", + "//executorch/schema:bundled_program_schema", + "//executorch/schema:schema", + "//executorch/util:aten_bridge", + "//executorch/util:bundled_program_verification", + "//executorch/util:embedded_data_loader", + "//executorch/util:mmap_data_loader", + "//executorch/util:test_memory_config", + "//executorch/util:util", + "//executorch/executor/test:test_backend_compiler_lib", +] + get_all_cpu_backend_targets() + +# Generated lib for all ATen ops with aten kernel used by models in model inventory +MODELS_ATEN_OPS_ATEN_MODE_GENERATED_LIB = [ + "//executorch/kernels/quantized:generated_lib_aten", + "//executorch/kernels/aten:generated_lib_aten", +] + +# Generated libs for all ATen ops AND custom ops used by models in //pye/model_inventory +MODELS_ALL_OPS_ATEN_MODE_GENERATED_LIB = MODELS_ATEN_OPS_ATEN_MODE_GENERATED_LIB + [ + "//caffe2/fb/custom_ops/turing:turing_lib_aten", + "//pye/model_inventory/asr_models/runtime:generated_lib_aten", + "//pye/model_inventory/asr_models/runtime:custom_ops_generated_lib_aten", + "//pye/model_inventory/fam_models/runtime:generated_lib_aten", + "//pye/model_inventory/ocr_detection_model_non_quantized/runtime:generated_lib_aten", + "//caffe2/fb/custom_ops/nimble/et_runtime:generated_lib_aten", + "//pye/model_inventory/keyboard_tracking_model/runtime:generated_lib_aten", +] + +def executorch_pybindings(python_module_name, srcs = [], cppdeps = [], visibility = ["//executorch/..."]): + runtime.cxx_python_extension( + name = python_module_name, + srcs = [ + "pybindings.cpp", + ] + srcs, + base_module = "executorch.pybindings", + preprocessor_flags = [ + "-DEXECUTORCH_PYTHON_MODULE_NAME={}".format(python_module_name), + ], + deps = [ + "//executorch/core:core", + "//executorch/schema:schema", + "//executorch/util:read_file", + ] + cppdeps, + external_deps = [ + "pybind11", + ], + xplat_deps = [ + "//arvr/third-party/pybind11:pybind11", + ], + use_static_deps = True, + _is_external_target = bool(visibility != ["//executorch/..."]), + visibility = visibility, + ) + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + executorch_pybindings( + srcs = [ + "module_stub.cpp", + ], + python_module_name = "operator", + ) diff --git a/pytree/targets.bzl b/pytree/targets.bzl new file mode 100644 index 00000000000..bf7f420821a --- /dev/null +++ b/pytree/targets.bzl @@ -0,0 +1,22 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "pytree", + srcs = [], + exported_headers = ["pytree.h"], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/compiler:compiler", + "//executorch/core:core", + ], + ) diff --git a/schema/targets.bzl b/schema/targets.bzl new file mode 100644 index 00000000000..65795aa05df --- /dev/null +++ b/schema/targets.bzl @@ -0,0 +1,144 @@ +load("@fbcode_macros//build_defs:export_files.bzl", "export_file") +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +# Construct the input and output file names. All input and output files rely on scalar_type file. +PROGRAM_STEM = "schema" +BUNLDED_STEM = "bundled_program_schema" +SCALAR_TYPE_STEM = "scalar_type" + +INPUT_PROGRAM = PROGRAM_STEM + ".fbs" +INPUT_BUNDLED = BUNLDED_STEM + ".fbs" +INPUT_SCALAR_TYPE = SCALAR_TYPE_STEM + ".fbs" + +OUTPUT_PROGRAM_HEADER = PROGRAM_STEM + "_generated.h" +OUTPUT_BUNDLED_HEADER = BUNLDED_STEM + "_generated.h" +OUTPUT_SCALAR_TYPE_HEADER = SCALAR_TYPE_STEM + "_generated.h" + +PROGRAM_GEN_RULE_NAME = "generate_program" +BUNDLED_GEN_RULE_NAME = "generate_bundled_program" + +PROGRAM_LIRRARY_NAME = PROGRAM_STEM +BUNDLED_LIBRARY_NAME = BUNLDED_STEM + +def _generate_schema_header(rule_name, srcs, headers, default_header): + """Generate header file given flatbuffer schema + """ + runtime.genrule( + name = rule_name, + srcs = srcs, + # We're only generating a single file, so it seems like we could use + # `out`, but `flatc` takes a directory as a parameter, not a single + # file. Use `outs` so that `${OUT}` is expanded as the containing + # directory instead of the file itself. + outs = {header: [header] for header in headers}, + default_outs = [default_header], + cmd = " ".join([ + "$(exe fbsource//third-party/flatbuffers:flatc)", + "--cpp", + "--cpp-std c++11", + "--gen-mutable", + "--scoped-enums", + "-o ${OUT}", + "${SRCS}", + # Let our infra know that the file was generated. + " ".join(["&& echo // @" + "generated >> ${OUT}/" + header for header in headers]), + ]), + visibility = [], # Private + ) + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + export_file( + name = INPUT_PROGRAM, + visibility = [ + "//executorch/exir/scripts/...", + "//executorch/exir/serialize/...", + ], + ) + export_file( + name = INPUT_BUNDLED, + visibility = [ + "//executorch/bundled_program/serialize/...", + ], + ) + export_file( + name = INPUT_SCALAR_TYPE, + visibility = [ + "//executorch/bundled_program/serialize/...", + "//executorch/exir/serialize/...", + "//executorch/sdk/etdump/...", + ], + ) + + _generate_schema_header( + PROGRAM_GEN_RULE_NAME, + [INPUT_PROGRAM, INPUT_SCALAR_TYPE], + [OUTPUT_PROGRAM_HEADER, OUTPUT_SCALAR_TYPE_HEADER], + OUTPUT_PROGRAM_HEADER, + ) + + _generate_schema_header( + BUNDLED_GEN_RULE_NAME, + [INPUT_BUNDLED, INPUT_SCALAR_TYPE], + [OUTPUT_BUNDLED_HEADER, OUTPUT_SCALAR_TYPE_HEADER], + OUTPUT_BUNDLED_HEADER, + ) + + # Header-only library target with the generate executorch program schema header. + runtime.cxx_library( + name = PROGRAM_LIRRARY_NAME, + srcs = [], + visibility = [ + # Lock this down as tightly as possible to ensure that flatbuffers + # are an implementation detail. Ideally this list would only include + # //executorch/executor/... + "//executorch/pybindings/...", + "//executorch/executor/...", + "//executorch/util/...", # bundled_program_verification + ], + exported_headers = { + OUTPUT_PROGRAM_HEADER: ":{}[{}]".format(PROGRAM_GEN_RULE_NAME, OUTPUT_PROGRAM_HEADER), + OUTPUT_SCALAR_TYPE_HEADER: ":{}[{}]".format(PROGRAM_GEN_RULE_NAME, OUTPUT_SCALAR_TYPE_HEADER), + }, + exported_deps = [ + "fbsource//third-party/flatbuffers:flatbuffers-api", + ], + ) + + # Header-only library target with the generate bundled program schema header. + runtime.cxx_library( + name = BUNDLED_LIBRARY_NAME, + srcs = [], + visibility = [ + "//executorch/bundled_program/...", + "//executorch/pybindings/...", + "//executorch/util/...", # bundled_program_verification + ], + exported_headers = { + OUTPUT_BUNDLED_HEADER: ":{}[{}]".format(BUNDLED_GEN_RULE_NAME, OUTPUT_BUNDLED_HEADER), + OUTPUT_SCALAR_TYPE_HEADER: ":{}[{}]".format(PROGRAM_GEN_RULE_NAME, OUTPUT_SCALAR_TYPE_HEADER), + }, + exported_deps = [ + "fbsource//third-party/flatbuffers:flatbuffers-api", + ], + ) + + runtime.cxx_library( + name = "extended_header", + srcs = ["extended_header.cpp"], + exported_headers = [ + "extended_header.h", + ], + visibility = [ + "//executorch/executor/...", + "//executorch/schema/test/...", + ], + exported_deps = [ + "//executorch/core:core", + ], + ) diff --git a/schema/test/targets.bzl b/schema/test/targets.bzl new file mode 100644 index 00000000000..a8d4d35c043 --- /dev/null +++ b/schema/test/targets.bzl @@ -0,0 +1,18 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_test( + name = "extended_header_test", + srcs = [ + "extended_header_test.cpp", + ], + deps = [ + "//executorch/schema:extended_header", + ], + ) diff --git a/sdk/etdump/targets.bzl b/sdk/etdump/targets.bzl new file mode 100644 index 00000000000..b93b5ceb586 --- /dev/null +++ b/sdk/etdump/targets.bzl @@ -0,0 +1,171 @@ +load("@fbcode_macros//build_defs:export_files.bzl", "export_file") +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +ETDUMP_STEM = "etdump_schema" +ETDUMP_SCHEMA = ETDUMP_STEM + ".fbs" +ETDUMP_GEN_RULE_NAME = "generate_etdump" +ETDUMP_LIBRARY_NAME = ETDUMP_STEM + +SCALAR_TYPE_STEM = "scalar_type" +SCALAR_TYPE = SCALAR_TYPE_STEM + ".fbs" + +# flatbuffers:flatc + +ETDUMP_SCHEMA_HEADER = ETDUMP_STEM + "_generated.h" +OUTPUT_SCALAR_TYPE_HEADER = SCALAR_TYPE_STEM + "_generated.h" + +# flatcc +ETDUMP_GEN_RULE_NAME_FLATCC = ETDUMP_GEN_RULE_NAME + "_flatcc" + +ETDUMP_SCHEMA_BUILDER = ETDUMP_STEM + "_builder.h" +ETDUMP_SCHEMA_READER = ETDUMP_STEM + "_reader.h" +ETDUMP_SCHEMA_VERIFIER = ETDUMP_STEM + "_verifier.h" + +SCALAR_TYPE_BUILDER = SCALAR_TYPE_STEM + "_builder.h" +SCALAR_TYPE_READER = SCALAR_TYPE_STEM + "_reader.h" +SCALAR_TYPE_VERIFIER = SCALAR_TYPE_STEM + "_verifier.h" + +FLATBUFFERS_COMMON_STEM = "flatbuffers_common" +FLATBUFFERS_COMMON_BUILDER = FLATBUFFERS_COMMON_STEM + "_builder.h" +FLATBUFFERS_COMMON_READER = FLATBUFFERS_COMMON_STEM + "_reader.h" + +def generate_schema_header(rule_name, srcs, headers, default_header): + """ + Generate header files for ETDump schema + """ + + runtime.genrule( + name = rule_name, + srcs = srcs, + outs = {header: [header] for header in headers}, + default_outs = [default_header], + cmd = " ".join([ + "$(exe fbsource//third-party/flatbuffers:flatc)", + "--cpp", + "--cpp-std c++11", + "--gen-mutable", + "--scoped-enums", + "-o ${OUT}", + "${SRCS}", + # Let our infra know that the file was generated. + " ".join(["&& echo '// @''generated' >> ${OUT}/" + header for header in headers]), + ]), + ) + +def generate_schema_header_flatcc(rule_name, srcs, headers, default_headers): + """ + Generate header files for ETDump schema + """ + runtime.genrule( + name = rule_name, + srcs = srcs, + outs = {header: [header] for header in headers}, + default_outs = default_headers, + cmd = " ".join([ + "$(exe fbsource//arvr/third-party/flatcc:flatcc-cli)", + "-cwr", + "-o ${OUT}", + "${SRCS}", + # Let our infra know that the file was generated. + " ".join(["&& echo '// @''generated' >> ${OUT}/" + header for header in headers]), + ]), + ) + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + export_file( + name = ETDUMP_SCHEMA, + visibility = ["//executorch/..."], + ) + + generate_schema_header( + ETDUMP_GEN_RULE_NAME, + [ETDUMP_SCHEMA, SCALAR_TYPE], + [ETDUMP_SCHEMA_HEADER, OUTPUT_SCALAR_TYPE_HEADER], + ETDUMP_SCHEMA_HEADER, + ) + + runtime.cxx_library( + name = ETDUMP_LIBRARY_NAME, + srcs = [], + visibility = ["//executorch/..."], + exported_headers = { + ETDUMP_SCHEMA_HEADER: ":{}[{}]".format(ETDUMP_GEN_RULE_NAME, ETDUMP_SCHEMA_HEADER), + OUTPUT_SCALAR_TYPE_HEADER: ":{}[{}]".format(ETDUMP_GEN_RULE_NAME, OUTPUT_SCALAR_TYPE_HEADER), + }, + exported_deps = [ + "fbsource//third-party/flatbuffers:flatbuffers-api", + ], + ) + + runtime.cxx_library( + name = "etdump", + srcs = ["etdump.cpp"], + exported_headers = ["etdump.h"], + deps = [ + ":etdump_gen", + "//executorch/core:core", + ], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + runtime.cxx_library( + name = "etdump_gen", + srcs = ["etdump_gen.cpp"], + exported_headers = ["etdump_gen.h"], + deps = [], + exported_deps = [ + ":etdump_schema", + "//executorch/core:abort", + "//executorch/executor:memory_manager", + ], + visibility = [ + "//executorch/...", + ], + ) + + generate_schema_header_flatcc( + ETDUMP_GEN_RULE_NAME_FLATCC, + [ETDUMP_SCHEMA, SCALAR_TYPE], + [ + ETDUMP_SCHEMA_BUILDER, + ETDUMP_SCHEMA_READER, + ETDUMP_SCHEMA_VERIFIER, + SCALAR_TYPE_BUILDER, + SCALAR_TYPE_READER, + SCALAR_TYPE_VERIFIER, + FLATBUFFERS_COMMON_BUILDER, + FLATBUFFERS_COMMON_READER, + ], + [ + ETDUMP_SCHEMA_BUILDER, + ETDUMP_SCHEMA_READER, + ETDUMP_SCHEMA_VERIFIER, + ], + ) + + runtime.cxx_library( + name = ETDUMP_LIBRARY_NAME + "_flatcc", + srcs = [], + visibility = ["//executorch/..."], + exported_headers = { + ETDUMP_SCHEMA_BUILDER: ":{}[{}]".format(ETDUMP_GEN_RULE_NAME_FLATCC, ETDUMP_SCHEMA_BUILDER), + ETDUMP_SCHEMA_READER: ":{}[{}]".format(ETDUMP_GEN_RULE_NAME_FLATCC, ETDUMP_SCHEMA_READER), + ETDUMP_SCHEMA_VERIFIER: ":{}[{}]".format(ETDUMP_GEN_RULE_NAME_FLATCC, ETDUMP_SCHEMA_VERIFIER), + SCALAR_TYPE_BUILDER: ":{}[{}]".format(ETDUMP_GEN_RULE_NAME_FLATCC, SCALAR_TYPE_BUILDER), + SCALAR_TYPE_READER: ":{}[{}]".format(ETDUMP_GEN_RULE_NAME_FLATCC, SCALAR_TYPE_READER), + SCALAR_TYPE_VERIFIER: ":{}[{}]".format(ETDUMP_GEN_RULE_NAME_FLATCC, SCALAR_TYPE_VERIFIER), + FLATBUFFERS_COMMON_BUILDER: ":{}[{}]".format(ETDUMP_GEN_RULE_NAME_FLATCC, FLATBUFFERS_COMMON_BUILDER), + FLATBUFFERS_COMMON_READER: ":{}[{}]".format(ETDUMP_GEN_RULE_NAME_FLATCC, FLATBUFFERS_COMMON_READER), + }, + exported_deps = [ + "fbsource//arvr/third-party/flatcc:flatcc", + ], + ) diff --git a/sdk/etdump/tests/targets.bzl b/sdk/etdump/tests/targets.bzl new file mode 100644 index 00000000000..c554ca63470 --- /dev/null +++ b/sdk/etdump/tests/targets.bzl @@ -0,0 +1,21 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_test( + name = "etdump_gen_test", + srcs = [ + "ETDumpGenTest.cpp", + ], + deps = [ + "//executorch/executor:memory_manager", + "//executorch/profiler:profiler", + "//executorch/sdk/etdump:etdump_gen", + ], + preprocessor_flags = ["-DPROFILING_ENABLED"], + ) diff --git a/test/models/targets.bzl b/test/models/targets.bzl new file mode 100644 index 00000000000..34a32b52197 --- /dev/null +++ b/test/models/targets.bzl @@ -0,0 +1,148 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """ + Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + runtime.python_library( + name = "linear_model", + srcs = ["linear_model.py"], + deps = [ + "//caffe2:torch", + ], + visibility = [], # Private + ) + + runtime.python_library( + name = "generate_linear_out_bundled_program_lib", + srcs = ["generate_linear_out_bundled_program.py"], + deps = [ + ":linear_model", + "//caffe2:torch", + "//executorch/bundled_program:config", + "//executorch/bundled_program:core", + "//executorch/bundled_program/serialize:lib", + "//executorch/exir:lib", + "//executorch/exir/serialize:lib", + ], + ) + + runtime.python_binary( + name = "generate_linear_out_bundled_program", + main_module = "executorch.test.models.generate_linear_out_bundled_program", + deps = [ + ":generate_linear_out_bundled_program_lib", + ], + ) + + runtime.python_library( + name = "export_program_lib", + srcs = ["export_program.py"], + deps = [ + "//caffe2:torch", + "//executorch/test/end2end:exported_module", + ], + visibility = [], # Private + ) + + runtime.python_binary( + name = "export_program", + main_module = "executorch.test.models.export_program", + deps = [ + ":export_program_lib", + ], + visibility = [], # Private + ) + + # Class names of nn.Modules for :exported_programs to export. + MODULES_TO_EXPORT = [ + "ModuleAdd", + "ModuleBasic", + "ModuleLinear", + "ModuleMultipleEntry", + ] + + # Generates Executorch .ff program files for various modules at build time. + # To use one, depend on a target like ":exported_programs[ModuleAdd.ff]". + runtime.genrule( + name = "exported_programs", + cmd = "$(exe :export_program) --modules " + ",".join(MODULES_TO_EXPORT) + " --outdir $OUT", + outs = {fname + ".ff": [fname + ".ff"] for fname in MODULES_TO_EXPORT}, + default_outs = ["."], + visibility = [ + "//executorch/...", + # This genrule can't run in xplat since it uses EXIR, so make its + # output visible to xplat tests. This is an exceptional case, and + # typically shouldn't be done. + "fbsource//xplat/executorch/...", + "fbsource//arvr/projects/nimble/common/ExecutorchModule/...", + ], + # Allow the xplat entry in the visibility list. This is an exceptional + # case, and typically shouldn't be done. + _is_external_target = True, + ) + + runtime.python_library( + name = "export_delegated_program_lib", + srcs = ["export_delegated_program.py"], + deps = [ + "//caffe2:torch", + "//executorch/backends:backend_api", + "//executorch/backends/test:backend_with_compiler_demo", + "//executorch/exir:lib", + ], + visibility = [], # Private + ) + + runtime.python_binary( + name = "export_delegated_program", + main_module = "executorch.test.models.export_delegated_program", + # Use the https://www.internalfb.com/intern/wiki/XAR/ format so that + # python files in the archive have predictable names/paths even in opt + # mode. Without this `par_style` override, torch dynamo fails to skip + # the tracing of files under the `caffe2/torch/_dynamo` directory; the + # skips are based on the paths in the `__file__` strings at runtime, but + # normal PAR mangles them in an incompatible way in opt mode. See + # T151983912 for more background. + par_style = "xar", + deps = [ + ":export_delegated_program_lib", + ], + visibility = [], # Private + ) + + # Class names of nn.Modules for :exported_delegated_programs to export. + DELEGATED_MODULES_TO_EXPORT = [ + "ModuleAddMul", + ] + + # Name of the backend to use when exporting delegated programs. + BACKEND_ID = "StubBackend" + + # Generates Executorch .ff program files for various modules at build time. + # To use one, depend on a target like + # ":exported_delegated_programs[ModuleAdd.ff]" or + # ":exported_delegated_programs[ModuleAdd-nosegments.ff]" (which does not + # extract the delegate data blobs into segments). + runtime.genrule( + name = "exported_delegated_programs", + cmd = "$(exe :export_delegated_program)" + + " --modules " + ",".join(DELEGATED_MODULES_TO_EXPORT) + + " --backend_id " + BACKEND_ID + + " --outdir $OUT", + outs = { + fname + seg_suffix + da_suffix + ".ff": [fname + seg_suffix + da_suffix + ".ff"] + for fname in DELEGATED_MODULES_TO_EXPORT + for seg_suffix in ["", "-nosegments"] + # "da" = delegate alignment + for da_suffix in ["", "-da1024"] + }, + default_outs = ["."], + visibility = [ + "//executorch/executor/test/...", + "//executorch/test/...", + ], + ) diff --git a/test/targets.bzl b/test/targets.bzl new file mode 100644 index 00000000000..00ad45828c7 --- /dev/null +++ b/test/targets.bzl @@ -0,0 +1,129 @@ +load( + "@fbsource//tools/build_defs:default_platform_defs.bzl", + "ANDROID", + "CXX", +) +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load("@fbsource//xplat/executorch/pybindings:targets.bzl", "MODELS_ALL_OPS_ATEN_MODE_GENERATED_LIB", "MODELS_ALL_OPS_LEAN_MODE_GENERATED_LIB") + +SIZE_TEST_SOURCES = [ + "size_test.cpp", +] + +SIZE_TEST_DEPS = [ + "//executorch/executor:executor", + "//executorch/util:file_data_loader", + "//executorch/util:util", +] + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + # Test driver for models, uses all portable kernels. + for aten_mode in (True, False): + aten_suffix = ("_aten" if aten_mode else "") + runtime.cxx_binary( + name = "executor_runner" + aten_suffix, + srcs = ["executor_runner.cpp"], + deps = [ + "//executorch/executor/test:test_backend_compiler_lib" + aten_suffix, + "//executorch/executor:executor" + aten_suffix, + "//executorch/sdk/etdump:etdump", + "//executorch/util:bundled_program_verification" + aten_suffix, + "//executorch/util:embedded_data_loader", + "//executorch/util:file_data_loader", + "//executorch/util:util" + aten_suffix, + ] + (MODELS_ALL_OPS_ATEN_MODE_GENERATED_LIB if aten_mode else [ + "//executorch/configurations:executor_cpu_optimized", + ] + MODELS_ALL_OPS_LEAN_MODE_GENERATED_LIB), + preprocessor_flags = ["-DUSE_ATEN_LIB"] if aten_mode else [], + external_deps = [ + "gflags", + ], + platforms = [ANDROID, CXX], + xplat_deps = [ + "//xplat/third-party/gflags:gflags", + ], + define_static_target = not aten_mode, + visibility = [ + "@EXECUTORCH_CLIENTS", + ], + ) + + # DO NOT MODIFY: This target `size_test_static` is being used on a per-diff + # CI job to guard Executorch binary size. It doesn't contain any operators + # or kernels thus shouldn't be used to run a model. Adding/removing dependencies + # will likely result in inaccurate measure results. + # + # It's also best to build this with `-c executorch.enable_program_verification=false` + # to remove ~30kB of optional flatbuffer verification code from the binary. + runtime.cxx_binary( + name = "size_test", + srcs = SIZE_TEST_SOURCES, + deps = SIZE_TEST_DEPS, + define_static_target = True, + ) + + runtime.cxx_binary( + name = "size_test_all_ops", + srcs = SIZE_TEST_SOURCES, + deps = SIZE_TEST_DEPS + [ + "//executorch/kernels/portable:generated_lib_all_ops", + ], + define_static_target = True, + ) + + runtime.export_file( + name = "size_test.cpp", + visibility = [ + "@EXECUTORCH_CLIENTS", + ], + ) + + # Test binary that can create multiple Executor instances in the same + # process. + runtime.cxx_binary( + name = "multi_runner", + srcs = ["multi_runner.cpp"], + deps = [ + "//executorch/core:core", + "//executorch/kernels/portable:generated_lib_all_ops", + "//executorch/executor:executor", + "//executorch/executor/test:managed_memory_manager", + "//executorch/util:embedded_data_loader", + "//executorch/util:read_file", + "//executorch/util:util", + ], + external_deps = [ + "gflags", + ], + xplat_deps = [ + "//xplat/third-party/gflags:gflags", + ], + ) + + # Test binary that can create relocatable Executor instances. + runtime.cxx_binary( + name = "relocatable_runner", + srcs = ["relocatable_runner.cpp"], + deps = [ + "//executorch/kernels/portable:generated_lib_all_ops", + "//executorch/executor:executor", + "//executorch/configurations:executor_cpu_optimized", + "//executorch/util:embedded_data_loader", + "//executorch/util:read_file", + "//executorch/util:util", + ], + external_deps = [ + "gflags", + ], + preprocessor_flags = [], + define_static_target = True, + xplat_deps = [ + "//xplat/third-party/gflags:gflags", + ], + ) diff --git a/test/utils/targets.bzl b/test/utils/targets.bzl new file mode 100644 index 00000000000..a019da2df0b --- /dev/null +++ b/test/utils/targets.bzl @@ -0,0 +1,45 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "utils", + srcs = [ + "UnitTestMain.cpp", + ], + exported_headers = [ + "alignment.h", + "DeathTest.h", + ], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + deps = [ + "//executorch/compiler:compiler", + "//executorch/core:core", + ], + fbcode_exported_deps = [ + "//common/init:init", + "//common/gtest:gtest", + ], + xplat_exported_deps = [ + "//xplat/folly:init_init", + "//xplat/third-party/gmock:gmock", + ], + ) + + runtime.cxx_test( + name = "alignment_test", + srcs = [ + "alignment_test.cpp", + ], + deps = [ + ":utils", + ], + ) diff --git a/threadpool/targets.bzl b/threadpool/targets.bzl new file mode 100644 index 00000000000..dfeb2951983 --- /dev/null +++ b/threadpool/targets.bzl @@ -0,0 +1,43 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "threadpool", + srcs = [ + "threadpool.cpp", + "threadpool_guard.cpp", + "fb/thread_pool_use_nthreads.cpp", + ], + deps = [ + "//executorch/core:core", + ], + fbcode_deps = [ + ":cpuinfo", + ], + xplat_deps = [ + "//third-party/cpuinfo:cpuinfo", + ], + exported_headers = [ + "threadpool.h", + "threadpool_guard.h", + "fb/thread_pool_use_nthreads.h", + ], + exported_deps = [ + "//xplat/third-party/pthreadpool:pthreadpool", + ], + exported_preprocessor_flags = [ + "-DET_USE_THREADPOOL", + ], + visibility = [ + "//executorch/...", + "//executorch/backends/...", + "//executorch/threadpool/test/...", + "@EXECUTORCH_CLIENTS", + ], + ) diff --git a/threadpool/test/targets.bzl b/threadpool/test/targets.bzl new file mode 100644 index 00000000000..bb552ffb7f5 --- /dev/null +++ b/threadpool/test/targets.bzl @@ -0,0 +1,18 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + runtime.cxx_test( + name = "ThreadPoolTest", + srcs = [ + "ThreadPoolTest.cpp", + "fb/ThreadPoolUseNThreadsTest.cpp", + ], + deps = [ + "//executorch/threadpool:threadpool", + ], + ) diff --git a/util/targets.bzl b/util/targets.bzl new file mode 100644 index 00000000000..f9f97173d89 --- /dev/null +++ b/util/targets.bzl @@ -0,0 +1,213 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + + runtime.cxx_library( + name = "dynamic_memory_allocator", + exported_headers = [ + "DynamicMemoryAllocator.h", + ], + exported_deps = [ + "//executorch/executor:memory_manager", + ], + visibility = [ + "//executorch/util/test/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + runtime.cxx_library( + name = "system", + exported_headers = [ + "system.h", + ], + visibility = [ + "//executorch/util/...", + "@EXECUTORCH_CLIENTS", + ], + ) + + runtime.cxx_library( + name = "test_memory_config", + srcs = [], + exported_headers = ["TestMemoryConfig.h"], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + deps = [ + "//executorch/core:core", + "//executorch/executor:memory_manager", + ], + ) + + runtime.cxx_library( + name = "read_file", + srcs = ["read_file.cpp"], + exported_headers = ["read_file.h"], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + ":system", + "//executorch/core:core", + "//executorch/compiler:compiler", + ], + ) + + runtime.cxx_library( + name = "embedded_data_loader", + srcs = [], + exported_headers = ["embedded_data_loader.h"], + visibility = [ + "//executorch/backends/test/...", + "//executorch/executor/test/...", + "//executorch/pybindings/...", + "//executorch/test/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/core:data_loader", + ], + ) + + runtime.cxx_library( + name = "shared_ptr_data_loader", + srcs = [], + exported_headers = ["shared_ptr_data_loader.h"], + visibility = [ + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/core:data_loader", + ], + ) + + runtime.cxx_library( + name = "file_data_loader", + srcs = ["file_data_loader.cpp"], + exported_headers = ["file_data_loader.h"], + visibility = [ + "//executorch/test/...", + "//executorch/executor/test/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/core:data_loader", + ], + ) + + runtime.cxx_library( + name = "mmap_data_loader", + srcs = ["mmap_data_loader.cpp"], + exported_headers = ["mmap_data_loader.h"], + visibility = [ + "//executorch/test/...", + "//executorch/pybindings/...", + "//executorch/executor/test/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/core:data_loader", + ], + ) + + runtime.cxx_library( + name = "memory_utils", + srcs = ["memory_utils.cpp"], + exported_headers = ["memory_utils.h"], + visibility = [ + "//executorch/backends/...", + "//executorch/util/test/...", + ], + deps = [ + "//executorch/core:core", + ], + exported_deps = [ + ":system", + ], + ) + + runtime.cxx_library( + name = "aten_bridge", + srcs = ["aten_bridge.cpp"], + exported_headers = ["aten_bridge.h"], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + deps = [ + "//executorch/core:core", + "//executorch/core/kernel_types:kernel_types", + ], + fbcode_deps = [ + "//caffe2:ATen-core", + "//caffe2:ATen-cpu", + "//caffe2/c10:c10", + ], + xplat_deps = [ + "//xplat/caffe2:torch_mobile_core", + "//xplat/caffe2/c10:c10", + ], + ) + + runtime.cxx_library( + name = "ivalue_flatten_unflatten", + srcs = ["ivalue_flatten_unflatten.cpp"], + exported_headers = ["ivalue_flatten_unflatten.h"], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/pytree:pytree", + ], + compiler_flags = ["-Wno-missing-prototypes"], + fbcode_deps = [ + "//caffe2:ATen-core", + "//caffe2:ATen-cpu", + "//caffe2/c10:c10", + ], + xplat_deps = [ + "//xplat/caffe2:torch_mobile_core", + "//xplat/caffe2/c10:c10", + ], + ) + + for aten_mode in (True, False): + aten_suffix = ("_aten" if aten_mode else "") + runtime.cxx_library( + name = "bundled_program_verification" + aten_suffix, + srcs = ["bundled_program_verification.cpp"], + exported_headers = ["bundled_program_verification.h"], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + deps = [ + "//executorch/core/kernel_types/testing:tensor_util" + aten_suffix, + "//executorch/executor:executor" + aten_suffix, + "//executorch/core/kernel_types/util:dim_order_util" + aten_suffix, + "//executorch/schema:schema", + "//executorch/schema:bundled_program_schema", + ], + ) + + runtime.cxx_library( + name = "util" + aten_suffix, + srcs = [], + exported_headers = ["util.h"], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + deps = [ + "//executorch/executor:executor" + aten_suffix, + ], + ) diff --git a/util/test/targets.bzl b/util/test/targets.bzl new file mode 100644 index 00000000000..6f290beb3b8 --- /dev/null +++ b/util/test/targets.bzl @@ -0,0 +1,121 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Defines targets that should be shared between fbcode and xplat. + + The directory containing this targets.bzl file should also contain both + TARGETS and BUCK files that call this function. + """ + runtime.cxx_library( + name = "temp_file", + srcs = [], + exported_headers = ["temp_file.h"], + visibility = [], # Private + ) + + runtime.cxx_test( + name = "temp_file_test", + srcs = [ + "temp_file_test.cpp", + ], + deps = [ + ":temp_file", + ], + ) + + runtime.cxx_test( + name = "ATenBridgeTest", + srcs = ["ATenBridgeTest.cpp"], + deps = [ + "//executorch/core:core", + "//executorch/core/kernel_types:kernel_types", + "//executorch/util:aten_bridge", + ], + fbcode_deps = [ + "//caffe2:ATen-core", + "//caffe2:ATen-cpu", + "//caffe2/c10:c10", + ], + xplat_deps = [ + "//xplat/caffe2:torch_mobile_core", + "//xplat/caffe2/c10:c10", + # Dont really like this but without this I dont have aten::empty + # And havent figured out a more minimal target + "//xplat/caffe2:torch_mobile_all_ops", + ], + ) + + runtime.cxx_test( + name = "dynamic_memory_allocator_test", + srcs = [ + "DynamicMemoryAllocatorTest.cpp", + ], + deps = [ + "//executorch/util:dynamic_memory_allocator", + ], + ) + + runtime.cxx_test( + name = "embedded_data_loader_test", + srcs = [ + "embedded_data_loader_test.cpp", + ], + deps = [ + "//executorch/util:embedded_data_loader", + ], + ) + + runtime.cxx_test( + name = "shared_ptr_data_loader_test", + srcs = [ + "shared_ptr_data_loader_test.cpp", + ], + deps = [ + "//executorch/util:shared_ptr_data_loader", + ], + ) + + runtime.cxx_test( + name = "file_data_loader_test", + srcs = [ + "file_data_loader_test.cpp", + ], + deps = [ + ":temp_file", + "//executorch/util:file_data_loader", + ], + ) + + runtime.cxx_test( + name = "mmap_data_loader_test", + srcs = [ + "mmap_data_loader_test.cpp", + ], + deps = [ + ":temp_file", + "//executorch/util:mmap_data_loader", + ], + ) + + runtime.cxx_test( + name = "memory_utils_test", + srcs = [ + "memory_utils_test.cpp", + ], + deps = [ + "//executorch/compiler:compiler", + "//executorch/util:memory_utils", + ], + ) + + runtime.cxx_test( + name = "ivalue_flatten_unflatten_test", + srcs = ["IvalueFlattenUnflattenTest.cpp"], + deps = ["//executorch/util:ivalue_flatten_unflatten"], + fbcode_deps = [ + "//caffe2:torch-cpp", + ], + xplat_deps = [ + "//xplat/caffe2:torch_mobile_all_ops", + ], + )