diff --git a/.gitignore b/.gitignore index aa0685fb..a29fb069 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,7 @@ xcuserdata/ .swiftpm/ *.xcworkspace/ + +# MV2 +mv2/cpp/build + diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..24dcd6b7 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,4 @@ +[submodule "mv2/cpp/executorch"] + path = mv2/cpp/executorch + url = https://github.com/pytorch/executorch.git + branch = release/0.6 diff --git a/mv2/cpp/CMakeLists.txt b/mv2/cpp/CMakeLists.txt new file mode 100644 index 00000000..fb1ef99a --- /dev/null +++ b/mv2/cpp/CMakeLists.txt @@ -0,0 +1,40 @@ +cmake_minimum_required(VERSION 3.18 FATAL_ERROR) +project(executorch_mv2_demo CXX) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +# Set options for executorch build. +option(EXECUTORCH_ENABLE_LOGGING "" ON) +option(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER "" ON) +option(EXECUTORCH_BUILD_EXTENSION_MODULE "" ON) +option(EXECUTORCH_BUILD_EXTENSION_TENSOR "" ON) +option(EXECUTORCH_BUILD_KERNELS_OPTIMIZED "" ON) +option(EXECUTORCH_BUILD_XNNPACK "" ON) + +# Add ExecutorTorch subdirectory +add_subdirectory("executorch") + +set(DEMO_SOURCES main.cpp) + +# Create executable +add_executable(executorch_mv2_demo_app ${DEMO_SOURCES}) + +# Include directories +target_include_directories(executorch_mv2_demo_app PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) + +# Link libraries +target_link_libraries( + executorch_mv2_demo_app + PRIVATE executorch + extension_module_static + extension_tensor + optimized_native_cpu_ops_lib + xnnpack_backend +) + +# Set output directory +set_target_properties(executorch_mv2_demo_app + PROPERTIES + RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin" +) diff --git a/mv2/cpp/README.md b/mv2/cpp/README.md new file mode 100644 index 00000000..e827c3be --- /dev/null +++ b/mv2/cpp/README.md @@ -0,0 +1,44 @@ +# ExecutorTorch MobileNetV2 Demo C++ Application + +This is a simple C++ demo application that uses the ExecutorTorch library for MobileNetV2 model inference. + +## Build instructions + +0. Export the model. See [mv2/python/README.md](../python/README.md) + +1. The ExecuTorch repository is already configured as a git submodule at `~/executorch-examples/cpp/executorch/`. To initialize it: + ```bash + cd ~/executorch-examples/ + git submodule sync + git submodule update --init --recursive + ``` + +2. Install dev requirements for ExecuTorch + + ```bash + cd ~/executorch-examples/mv2/cpp/executorch + pip install -r requirements-dev.txt + ``` + +3. Build the project: + ```bash + cd ~/executorch-examples/mv2/cpp + chmod +x build.sh + ./build.sh + ``` + +4. Run the demo application: + ```bash + ./build/bin/executorch_mv2_demo_app + ``` + +## Dependencies + +- CMake 3.18 or higher +- C++17 compatible compiler +- ExecutorTorch library (release/0.6) + +## Notes + +- Make sure you have the correct model file (`.pte`) compatible with ExecutorTorch. +- This demo currently initializes the input tensor with random data. In a real application, you would replace this with actual input data. \ No newline at end of file diff --git a/mv2/cpp/build.sh b/mv2/cpp/build.sh new file mode 100755 index 00000000..1672afd4 --- /dev/null +++ b/mv2/cpp/build.sh @@ -0,0 +1,14 @@ +#!/bin/bash +set -e + +# Create build directory if it doesn't exist +mkdir -p build +cd build + +# Configure CMake +cmake -DCMAKE_BUILD_TYPE=Release .. + +# Build the project +cmake --build . -j$(nproc) + +echo "Build complete! Executable located at: ./bin/executorch_mv2_demo_app" diff --git a/mv2/cpp/executorch b/mv2/cpp/executorch new file mode 160000 index 00000000..9820413e --- /dev/null +++ b/mv2/cpp/executorch @@ -0,0 +1 @@ +Subproject commit 9820413e60970d6aa03bba7d5a8b5223804d1955 diff --git a/mv2/cpp/main.cpp b/mv2/cpp/main.cpp new file mode 100644 index 00000000..d553257c --- /dev/null +++ b/mv2/cpp/main.cpp @@ -0,0 +1,23 @@ +#include +#include +#include + +using namespace ::executorch::extension; + +int main(int argc, char* argv[]) { + // Load the model. + Module module("../python/model_mv2_xnnpack.pte"); + + // Create an input tensor. + float input[1 * 3 * 224 * 224]; + auto tensor = from_blob(input, {1, 3, 224, 224}); + + // Perform an inference. + const auto result = module.forward(tensor); + + if (result.ok()) { + // Retrieve the output data. + const auto output = result->at(0).toTensor().const_data_ptr(); + std::cout << "Success" << std::endl; + } +} diff --git a/mv2/python/README.md b/mv2/python/README.md new file mode 100644 index 00000000..c6e57802 --- /dev/null +++ b/mv2/python/README.md @@ -0,0 +1,24 @@ +### Virtual environment setup +Create and activate a Python virtual environment: +```bash +python3 -m venv .venv && source .venv/bin/activate && pip install --upgrade pip +``` +Or alternatively, [install conda on your machine](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) +```bash +conda create -yn executorch-examples-mv2 python=3.10.0 && conda activate executorch-examples-mv2 +``` + +### Install dependencies +``` +pip install -r requirements.txt +``` + +### Export a model +``` +python export.py +``` + +### Run model via pybind +``` +python run.py +``` \ No newline at end of file diff --git a/mv2/python/export.py b/mv2/python/export.py new file mode 100644 index 00000000..80d8314c --- /dev/null +++ b/mv2/python/export.py @@ -0,0 +1,16 @@ +import torch +import torchvision.models as models +from torchvision.models.mobilenetv2 import MobileNet_V2_Weights +from executorch.backends.xnnpack.partition.xnnpack_partitioner import XnnpackPartitioner +from executorch.exir import to_edge_transform_and_lower + +model = models.mobilenetv2.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).eval() +sample_inputs = (torch.randn(1, 3, 224, 224), ) + +et_program = to_edge_transform_and_lower( + torch.export.export(model, sample_inputs), + partitioner=[XnnpackPartitioner()] +).to_executorch() + +with open("model_mv2_xnnpack.pte", "wb") as f: + f.write(et_program.buffer) diff --git a/mv2/python/requirements.txt b/mv2/python/requirements.txt new file mode 100644 index 00000000..cc0763f7 --- /dev/null +++ b/mv2/python/requirements.txt @@ -0,0 +1 @@ +executorch==0.6.0 diff --git a/mv2/python/run.py b/mv2/python/run.py new file mode 100644 index 00000000..4d312e54 --- /dev/null +++ b/mv2/python/run.py @@ -0,0 +1,20 @@ +import torch +from executorch.runtime import Runtime +from typing import List + +runtime = Runtime.get() + +input_tensor: torch.Tensor = torch.randn(1, 3, 224, 224) +program = runtime.load_program("model_mv2_xnnpack.pte") +method = program.load_method("forward") +output: List[torch.Tensor] = method.execute([input_tensor]) +print("Run succesfully via executorch") + +from torchvision.models.mobilenetv2 import MobileNet_V2_Weights +import torchvision.models as models + +eager_reference_model = models.mobilenetv2.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).eval() +eager_reference_output = eager_reference_model(input_tensor) + +print("Comparing against original PyTorch module") +print(torch.allclose(output[0], eager_reference_output, rtol=1e-3, atol=1e-5))