Skip to content

Commit abb9b7b

Browse files
committed
Add compliance suite skeleton and operator tests
ghstack-source-id: a21d704 ghstack-comment-id: 3003538522 Pull-Request: #11960
1 parent a0d1867 commit abb9b7b

23 files changed

+1292
-0
lines changed
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# Operator Compliance Test Suite
2+
3+
This directory contains operator tests that all backends are expected to pass. While not every backend will implement every operator or permutation, the expectation is that backend partitioners will only partition nodes that the backend can support. The partitioner should never error out due to not supporting an input node.
4+
5+
## Backend Registration
6+
7+
To plug into the test framework, each backend should provide an implementation of the Tester class, defined in backends/test/harness/tester.py. Backends can provide implementations of each stage, or use the default implementation, as appropriate.
8+
9+
At a minimum, the backend will likely need to provide a custom implementation of the Partition and ToEdgeTransformAndLower stages using the appropriate backend partitioner. See backends/xnnpack/test/tester/tester.py for an example implementation.
10+
11+
Once a tester is available, the backend flow(s) can be added in __init__.py in this directory by adding an entry to `ALL_TESTER_FLOWS`. Each flow entry consists of a name (used in the test case naming) and a function to instantiate a tester for a given model and input tuple.
12+
13+
## Test Cases
14+
15+
Operator test cases are defined under the operators/ directory. Tests are written in a backend-independent manner, and each test is programmatically expanded to generate a variant for each registered backend flow. The `@operator_test` decorator is applied to each test class to trigger this behavior. Tests can also be tagged with an appropriate type specifier, such as `@dtype_test`, to generate variants for each dtype. The decorators and "magic" live in __init__.py in this directory.
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
load(":targets.bzl", "define_common_targets")
2+
3+
define_common_targets(is_fbcode = True)
Lines changed: 163 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,163 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
# pyre-unsafe
8+
9+
10+
import logging
11+
import os
12+
import unittest
13+
14+
from enum import Enum
15+
from typing import Any, Callable, Tuple
16+
17+
import torch
18+
from executorch.backends.test.harness import Tester
19+
20+
logger = logging.getLogger(__name__)
21+
logger.setLevel(logging.INFO)
22+
23+
24+
# Read enabled backends from the environment variable. Enable all if
25+
# not specified (signalled by None).
26+
def get_enabled_backends():
27+
et_test_backends = os.environ.get("ET_TEST_ENABLED_BACKENDS")
28+
if et_test_backends is not None:
29+
return et_test_backends.split(",")
30+
else:
31+
return None
32+
33+
34+
_ENABLED_BACKENDS = get_enabled_backends()
35+
36+
37+
def is_backend_enabled(backend):
38+
if _ENABLED_BACKENDS is None:
39+
return True
40+
else:
41+
return backend in _ENABLED_BACKENDS
42+
43+
44+
ALL_TEST_FLOWS = []
45+
46+
if is_backend_enabled("xnnpack"):
47+
from executorch.backends.xnnpack.test.tester import Tester as XnnpackTester
48+
49+
XNNPACK_TEST_FLOW = ("xnnpack", XnnpackTester)
50+
ALL_TEST_FLOWS.append(XNNPACK_TEST_FLOW)
51+
52+
if is_backend_enabled("coreml"):
53+
from executorch.backends.apple.coreml.test.tester import CoreMLTester
54+
55+
COREML_TEST_FLOW = ("coreml", CoreMLTester)
56+
ALL_TEST_FLOWS.append(COREML_TEST_FLOW)
57+
58+
59+
DTYPES = [
60+
torch.int8,
61+
torch.uint8,
62+
torch.int16,
63+
torch.uint16,
64+
torch.int32,
65+
torch.uint32,
66+
torch.int64,
67+
torch.uint64,
68+
torch.float16,
69+
torch.float32,
70+
torch.float64,
71+
]
72+
73+
FLOAT_DTYPES = [
74+
torch.float16,
75+
torch.float32,
76+
torch.float64,
77+
]
78+
79+
80+
class TestType(Enum):
81+
STANDARD = 1
82+
DTYPE = 2
83+
84+
85+
def dtype_test(func):
86+
func.test_type = TestType.DTYPE
87+
return func
88+
89+
90+
def operator_test(cls):
91+
_create_tests(cls)
92+
return cls
93+
94+
95+
def _create_tests(cls):
96+
for key in dir(cls):
97+
if key.startswith("test_"):
98+
_expand_test(cls, key)
99+
100+
101+
def _expand_test(cls, test_name: str):
102+
test_func = getattr(cls, test_name)
103+
for flow_name, tester_factory in ALL_TEST_FLOWS:
104+
_create_test_for_backend(cls, test_func, flow_name, tester_factory)
105+
delattr(cls, test_name)
106+
107+
108+
def _make_wrapped_test(test_func, tester_factory):
109+
def wrapped_test(self):
110+
test_func(self, tester_factory)
111+
112+
return tester_factory
113+
114+
115+
def _make_wrapped_dtype_test(test_func, dtype, tester_factory):
116+
def wrapped_test(self):
117+
test_func(self, dtype, tester_factory)
118+
119+
return wrapped_test
120+
121+
122+
def _create_test_for_backend(
123+
cls,
124+
test_func: Callable,
125+
flow_name: str,
126+
tester_factory: Callable[[torch.nn.Module, Tuple[Any]], Tester],
127+
):
128+
test_type = getattr(test_func, "test_type", TestType.STANDARD)
129+
130+
if test_type == TestType.STANDARD:
131+
wrapped_test = _make_wrapped_test(test_func, tester_factory)
132+
test_name = f"{test_func.__name__}_{flow_name}"
133+
setattr(cls, test_name, wrapped_test)
134+
elif test_type == TestType.DTYPE:
135+
for dtype in DTYPES:
136+
wrapped_test = _make_wrapped_dtype_test(test_func, dtype, tester_factory)
137+
dtype_name = str(dtype)[6:] # strip "torch."
138+
test_name = f"{test_func.__name__}_{dtype_name}_{flow_name}"
139+
setattr(cls, test_name, wrapped_test)
140+
else:
141+
raise NotImplementedError(f"Unknown test type {test_type}.")
142+
143+
144+
class OperatorTest(unittest.TestCase):
145+
def _test_op(self, model, inputs, tester_factory):
146+
tester = (
147+
tester_factory(
148+
model,
149+
inputs,
150+
)
151+
.export()
152+
.to_edge_transform_and_lower()
153+
)
154+
155+
is_delegated = any(
156+
n.target == torch._higher_order_ops.executorch_call_delegate
157+
for n in tester.stages[tester.cur].graph_module.graph.nodes
158+
if n.op == "call_function"
159+
)
160+
161+
# Only run the runtime test if the op was delegated.
162+
if is_delegated:
163+
(tester.to_executorch().serialize().run_method_and_compare_outputs())
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
# pyre-unsafe
Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
# pyre-unsafe
8+
9+
10+
from typing import Callable
11+
12+
import torch
13+
14+
from executorch.backends.test.compliance_suite import (
15+
dtype_test,
16+
operator_test,
17+
OperatorTest,
18+
)
19+
20+
21+
class Model(torch.nn.Module):
22+
def forward(self, x, y):
23+
return x + y
24+
25+
26+
class ModelAlpha(torch.nn.Module):
27+
def __init__(self, alpha):
28+
super().__init__()
29+
self.alpha = alpha
30+
31+
def forward(self, x, y):
32+
return torch.add(x, y, alpha=self.alpha)
33+
34+
35+
@operator_test
36+
class Add(OperatorTest):
37+
@dtype_test
38+
def test_add_dtype(self, dtype, tester_factory: Callable) -> None:
39+
self._test_op(
40+
Model(),
41+
(
42+
(torch.rand(2, 10) * 100).to(dtype),
43+
(torch.rand(2, 10) * 100).to(dtype),
44+
),
45+
tester_factory,
46+
)
47+
48+
def test_add_f32_bcast_first(self, tester_factory: Callable) -> None:
49+
self._test_op(
50+
Model(),
51+
(
52+
torch.randn(5),
53+
torch.randn(1, 5, 1, 5),
54+
),
55+
tester_factory,
56+
)
57+
58+
def test_add_f32_bcast_second(self, tester_factory: Callable) -> None:
59+
self._test_op(
60+
Model(),
61+
(
62+
torch.randn(4, 4, 2, 7),
63+
torch.randn(2, 7),
64+
),
65+
tester_factory,
66+
)
67+
68+
def test_add_f32_bcast_unary(self, tester_factory: Callable) -> None:
69+
self._test_op(
70+
Model(),
71+
(
72+
torch.randn(5),
73+
torch.randn(1, 1, 5),
74+
),
75+
tester_factory,
76+
)
77+
78+
def test_add_f32_alpha(self, tester_factory: Callable) -> None:
79+
self._test_op(
80+
ModelAlpha(alpha=2),
81+
(
82+
torch.randn(1, 25),
83+
torch.randn(1, 25),
84+
),
85+
tester_factory,
86+
)
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
# pyre-unsafe
8+
9+
10+
from typing import Callable, Optional
11+
12+
import torch
13+
14+
from executorch.backends.test.compliance_suite import (
15+
dtype_test,
16+
operator_test,
17+
OperatorTest,
18+
)
19+
20+
21+
class Model(torch.nn.Module):
22+
def forward(self, x, y):
23+
return x / y
24+
25+
26+
class ModelWithRounding(torch.nn.Module):
27+
def __init__(self, rounding_mode: Optional[str]):
28+
super().__init__()
29+
self.rounding_mode = rounding_mode
30+
31+
def forward(self, x, y):
32+
return torch.div(x, y, rounding_mode=self.rounding_mode)
33+
34+
35+
@operator_test
36+
class Divide(OperatorTest):
37+
@dtype_test
38+
def test_divide_dtype(self, dtype, tester_factory: Callable) -> None:
39+
self._test_op(
40+
Model(),
41+
(
42+
(torch.rand(2, 10) * 100).to(dtype),
43+
(torch.rand(2, 10) * 100 + 0.1).to(
44+
dtype
45+
), # Adding 0.1 to avoid division by zero
46+
),
47+
tester_factory,
48+
)
49+
50+
def test_divide_f32_bcast_first(self, tester_factory: Callable) -> None:
51+
self._test_op(
52+
Model(),
53+
(
54+
torch.randn(5),
55+
torch.randn(1, 5, 1, 5).abs()
56+
+ 0.1, # Using abs and adding 0.1 to avoid division by zero
57+
),
58+
tester_factory,
59+
)
60+
61+
def test_divide_f32_bcast_second(self, tester_factory: Callable) -> None:
62+
self._test_op(
63+
Model(),
64+
(
65+
torch.randn(4, 4, 2, 7),
66+
torch.randn(2, 7).abs()
67+
+ 0.1, # Using abs and adding 0.1 to avoid division by zero
68+
),
69+
tester_factory,
70+
)
71+
72+
def test_divide_f32_bcast_unary(self, tester_factory: Callable) -> None:
73+
self._test_op(
74+
Model(),
75+
(
76+
torch.randn(5),
77+
torch.randn(1, 1, 5).abs()
78+
+ 0.1, # Using abs and adding 0.1 to avoid division by zero
79+
),
80+
tester_factory,
81+
)
82+
83+
def test_divide_f32_trunc(self, tester_factory: Callable) -> None:
84+
self._test_op(
85+
ModelWithRounding(rounding_mode="trunc"),
86+
(
87+
torch.randn(3, 4) * 10,
88+
torch.randn(3, 4).abs()
89+
+ 0.1, # Using abs and adding 0.1 to avoid division by zero
90+
),
91+
tester_factory,
92+
)
93+
94+
def test_divide_f32_floor(self, tester_factory: Callable) -> None:
95+
self._test_op(
96+
ModelWithRounding(rounding_mode="floor"),
97+
(
98+
torch.randn(3, 4) * 10,
99+
torch.randn(3, 4).abs()
100+
+ 0.1, # Using abs and adding 0.1 to avoid division by zero
101+
),
102+
tester_factory,
103+
)

0 commit comments

Comments
 (0)