Skip to content

[Backend Tester] Add CoreML tester implementation #11959

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 8, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions backends/apple/coreml/test/tester.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from typing import Any, List, Optional, Tuple

import executorch
import executorch.backends.test.harness.stages as BaseStages

import torch
from executorch.backends.apple.coreml.partition import CoreMLPartitioner
from executorch.backends.test.harness import Tester as TesterBase
from executorch.backends.test.harness.stages import StageType
from executorch.exir import EdgeCompileConfig
from executorch.exir.backend.partitioner import Partitioner


class Partition(BaseStages.Partition):
def __init__(self, partitioner: Optional[Partitioner] = None):
super().__init__(
partitioner=partitioner or CoreMLPartitioner,
)


class ToEdgeTransformAndLower(BaseStages.ToEdgeTransformAndLower):
def __init__(
self,
partitioners: Optional[List[Partitioner]] = None,
edge_compile_config: Optional[EdgeCompileConfig] = None,
):
super().__init__(
default_partitioner_cls=CoreMLPartitioner,
partitioners=partitioners,
edge_compile_config=edge_compile_config,
)


class CoreMLTester(TesterBase):
def __init__(
self,
module: torch.nn.Module,
example_inputs: Tuple[torch.Tensor],
dynamic_shapes: Optional[Tuple[Any]] = None,
):
# Specialize for XNNPACK
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

comment fix?

stage_classes = (
executorch.backends.test.harness.Tester.default_stage_classes()
| {
StageType.PARTITION: Partition,
StageType.TO_EDGE_TRANSFORM_AND_LOWER: ToEdgeTransformAndLower,
}
)

super().__init__(
module=module,
stage_classes=stage_classes,
example_inputs=example_inputs,
dynamic_shapes=dynamic_shapes,
)
17 changes: 13 additions & 4 deletions backends/test/operators/test_facto.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,16 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

# pyre-strict
# pyre-unsafe

#
# This file contains logic to run generated operator tests using the FACTO
# library (https://github.com/pytorch-labs/FACTO). To run the tests, first
# clone and install FACTO by running pip install . from the FACTO source
# directory. Then, from the executorch root directory, run the following:
#
# python -m unittest backends.test.operators.test_facto.FactoTestsXNNPACK
#

import copy
import functools
Expand All @@ -26,9 +35,9 @@
CombinedSpecDB = SpecDictDB | ExtraSpecDB

COMMON_TENSOR_CONSTRAINTS = [
cp.Rank.Ge(lambda deps: 1),
cp.Rank.Ge(lambda deps: 1), # Avoid zero and high rank tensors.
cp.Rank.Le(lambda deps: 4),
cp.Size.Ge(lambda deps, r, d: 1),
cp.Size.Ge(lambda deps, r, d: 1), # Keep sizes reasonable.
cp.Size.Le(lambda deps, r, d: 2**9),
]

Expand Down Expand Up @@ -171,7 +180,7 @@ def get_runtime_input_count(spec: Spec):
def setUp(self):
torch.set_printoptions(threshold=3)

def _test_op(self, op: OpOverload) -> None: # noqa
def _test_op(self, op: OpOverload) -> None: # noqa: C901
random_manager.seed(0)

# Strip namespace
Expand Down
Loading