-
Notifications
You must be signed in to change notification settings - Fork 524
/
Copy pathcore.py
395 lines (341 loc) · 17 KB
/
core.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import typing
from typing import Dict, List, Optional, Sequence, Type, Union
import executorch.devtools.bundled_program.schema as bp_schema
import executorch.exir.schema as core_schema
import torch
import torch.fx
from executorch.devtools.bundled_program.config import ConfigValue, MethodTestSuite
from executorch.devtools.bundled_program.version import BUNDLED_PROGRAM_SCHEMA_VERSION
from executorch.exir import ExecutorchProgram, ExecutorchProgramManager
from executorch.exir.tensor import get_scalar_type, scalar_type_enum, TensorSpec
# pyre-ignore
supported_program_type_table: Dict[Type[core_schema.KernelTypes], ConfigValue] = {
core_schema.Tensor: torch.Tensor,
core_schema.Int: int,
core_schema.Double: float,
core_schema.Bool: bool,
}
class BundledProgram:
"""
Bundled program contains all information needed to execute and verify the program on device.
Public Attributes:
method_test_suites: All test suites for verifying methods.
executorch_program: ExecutorchProgram-like variable, containing the Program to be verified by method_test_suites, including
ExecutorchProgram, MultiMethodExecutorchProgram or ExecutorchProgramManager.
"""
def __init__(
self,
executorch_program: Optional[
Union[
ExecutorchProgram,
ExecutorchProgramManager,
]
],
method_test_suites: Sequence[MethodTestSuite],
pte_file_path: Optional[str] = None,
):
"""Create BundledProgram by bundling the given program and method_test_suites together.
Args:
executorch_program: The program to be bundled.
method_test_suites: The testcases for certain methods to be bundled.
pte_file_path: The path to pte file to deserialize program if executorch_program is not provided.
"""
if not executorch_program and not pte_file_path:
raise RuntimeError(
"Either executorch_program or pte_file_path must be provided"
)
if executorch_program and pte_file_path:
raise RuntimeError(
"Only one of executorch_program or pte_file_path can be used"
)
method_test_suites = sorted(method_test_suites, key=lambda x: x.method_name)
if executorch_program:
self._assert_valid_bundle(executorch_program, method_test_suites)
self.executorch_program: Optional[
Union[
ExecutorchProgram,
ExecutorchProgramManager,
]
] = executorch_program
self._pte_file_path: Optional[str] = pte_file_path
self.method_test_suites = method_test_suites
# This is the cache for bundled program in schema type.
# User should not access this field directly. Please Use `serialize_to_schema` function instead.
self._bundled_program_in_schema: Optional[bp_schema.BundledProgram] = None
def serialize_to_schema(self) -> bp_schema.BundledProgram:
"""Serialize the current Bundled Program into its schema format for further serialization.."""
# Return cached value if exists
if self._bundled_program_in_schema is not None:
return self._bundled_program_in_schema
bundled_method_test_suites: List[bp_schema.BundledMethodTestSuite] = []
# Emit data and metadata of bundled tensor
for method_test_suite in self.method_test_suites:
bundled_test_cases: List[bp_schema.BundledMethodTestCase] = []
# emit I/O sets for each method test case
for i in range(len(method_test_suite.test_cases)):
inputs: List[bp_schema.Value] = []
expected_outputs: List[bp_schema.Value] = []
cur_plan_test_inputs = method_test_suite.test_cases[i].inputs
cur_plan_test_expected_outputs = method_test_suite.test_cases[
i
].expected_outputs
for input_val in cur_plan_test_inputs:
if type(input_val) is torch.Tensor:
self._emit_bundled_tensor(
TensorSpec.from_tensor(input_val, const=True),
inputs,
)
else:
self._emit_prim(
input_val,
inputs,
)
for expected_output_tensor in cur_plan_test_expected_outputs:
assert (
type(expected_output_tensor) is torch.Tensor
), "Only tensor outputs are currently supported."
self._emit_bundled_tensor(
TensorSpec.from_tensor(expected_output_tensor, const=True),
expected_outputs,
)
bundled_test_cases.append(
bp_schema.BundledMethodTestCase(
inputs=inputs, expected_outputs=expected_outputs
)
)
# emit the whole execution plan test
bundled_method_test_suites.append(
bp_schema.BundledMethodTestSuite(
method_name=method_test_suite.method_name,
test_cases=bundled_test_cases,
)
)
if self.executorch_program:
program_bytes = self.executorch_program.buffer
else:
assert self._pte_file_path is not None
with open(self._pte_file_path, "rb") as f:
program_bytes = f.read()
self._bundled_program_in_schema = bp_schema.BundledProgram(
version=BUNDLED_PROGRAM_SCHEMA_VERSION,
method_test_suites=bundled_method_test_suites,
program=program_bytes,
)
return self._bundled_program_in_schema
def _emit_bundled_tensor(
self, spec: TensorSpec, bundled_values: List[bp_schema.Value]
) -> None:
# QuantizedSchema in tensor has deprecated and may not be used anymore.
# So here we don't emit it.
if spec.allocated_memory == 0:
tensor_data: bytes = b""
else:
array_type = (
ctypes.c_char * typing.cast(torch.UntypedStorage, spec.storage).nbytes()
)
spec_array = ctypes.cast(
typing.cast(torch.UntypedStorage, spec.storage).data_ptr(),
ctypes.POINTER(array_type),
).contents
tensor_data: bytes = bytes(spec_array)
bundled_values.append(
bp_schema.Value(
val=bp_schema.Tensor(
scalar_type=scalar_type_enum(spec.dtype),
sizes=spec.shape,
data=tensor_data,
dim_order=list(spec.dim_order),
),
)
)
def _emit_prim(self, val: ConfigValue, bundled_values: List[bp_schema.Value]):
if type(val) is int:
bundled_values.append(bp_schema.Value(val=bp_schema.Int(int_val=val)))
elif type(val) is bool:
bundled_values.append(bp_schema.Value(val=bp_schema.Bool(bool_val=val)))
elif type(val) is float:
bundled_values.append(bp_schema.Value(val=bp_schema.Double(double_val=val)))
else:
assert 0, "Unsupported primitive type received."
def _get_program_input(
self, program: core_schema.Program, plan_idx: int, input_idx: int
) -> core_schema.KernelTypes:
return (
program.execution_plan[plan_idx]
.values[program.execution_plan[plan_idx].inputs[input_idx]]
.val
)
def _get_program_output(
self, program: core_schema.Program, plan_idx: int, output_idx: int
) -> core_schema.KernelTypes:
return (
program.execution_plan[plan_idx]
.values[program.execution_plan[plan_idx].outputs[output_idx]]
.val
)
def _get_input_dtype(
self, program: core_schema.Program, plan_idx: int, input_idx: int
) -> torch.dtype:
return get_scalar_type(
# pyre-fixme[16]: now assert all input and outputs is in tenor type. Support multuple datatypes in the future.
self._get_program_input(program, plan_idx, input_idx).scalar_type
)
def _get_input_type(
self, program: core_schema.Program, plan_idx: int, input_idx: int
) -> type:
type_lookup = {
core_schema.Int: int,
core_schema.Bool: bool,
core_schema.Double: float,
}
# pyre-fixme[6]: Incompatible parameter type [6]: In call `dict.__getitem__`, for 1st positional only parameter
# expected `Type[Union[core_schema.Bool, core_schema.Double, core_schema.Int]]` but got `Type[Union[core_schema.Bool, core_schema.Double, core_schema.Int, core_schema.Tensor, BoolList, DoubleList,
# IntList, Null, OptionalTensorList, String, TensorList]]`.
return type_lookup[type(self._get_program_input(program, plan_idx, input_idx))]
def _get_output_dtype(
self, program: core_schema.Program, plan_idx: int, output_idx: int
) -> torch.dtype:
return get_scalar_type(
# pyre-ignore[16]: now assert all outputs is in tensor type.
self._get_program_output(program, plan_idx, output_idx).scalar_type
)
def _assert_valid_bundle(
self,
executorch_program: Union[
ExecutorchProgram,
ExecutorchProgramManager,
],
method_test_suites: Sequence[MethodTestSuite],
) -> None:
"""Check if the program and method_test_suites matches each other.
Other checks not related to correspondence are done in config.py
Args:
executorch_program: The program to be bundled.
method_test_suites: The testcases for specific methods to be bundled.
"""
program = self._extract_program(executorch_program)
method_name_of_program = {e.name for e in program.execution_plan}
method_name_of_test_suites = {t.method_name for t in method_test_suites}
assert method_name_of_test_suites.issubset(
method_name_of_program
), f"All method names in bundled config should be found in program.execution_plan, \
but {str(method_name_of_test_suites - method_name_of_program)} does not include."
# check if method_test_suites has been sorted in ascending alphabetical order of method name.
for test_suite_id in range(1, len(method_test_suites)):
assert (
method_test_suites[test_suite_id - 1].method_name
<= method_test_suites[test_suite_id].method_name
), f"The method name of test suite should be sorted in ascending alphabetical \
order of method name, but {test_suite_id-1}-th and {test_suite_id}-th method_test_suite aren't."
# Check if the inputs' type meet Program's requirement
for method_test_suite in method_test_suites:
# Get the method with same method name as method_test_suite
program_plan_id = -1
for plan in program.execution_plan:
if plan.name == method_test_suite.method_name:
program_plan_id = program.execution_plan.index(plan)
break
# Raise Assertion Error if can not find the method with same method_name as method_test_suite in program.
assert (
program_plan_id != -1
), f"method_test_suites has testcases for method {method_test_suite.method_name}, but can not find it in the given program. All method names in the program are {', '.join([p.name for p in program.execution_plan])}."
plan = program.execution_plan[program_plan_id]
# Check if the type of Program's input is supported
for index in range(len(plan.inputs)):
assert (
type(self._get_program_input(program, program_plan_id, index))
in supported_program_type_table
), "The type of program's input isn't supported."
# Check if the type of Program's output is supported
for index in range(len(plan.outputs)):
assert (
type(self._get_program_output(program, program_plan_id, index))
== core_schema.Tensor
), "Only supports program with output in Tensor type."
# Check if the I/O sets of each execution plan test match program's requirement.
for i in range(len(method_test_suite.test_cases)):
cur_plan_test_inputs = method_test_suite.test_cases[i].inputs
cur_plan_test_expected_outputs = method_test_suite.test_cases[
i
].expected_outputs
assert len(plan.inputs) == len(
cur_plan_test_inputs
), "The number of input in each bundled set and Program shall equal, but get {} and {}".format(
len(plan.inputs),
len(cur_plan_test_inputs),
)
# Check if bundled input in the current exeution plan test share same type as input in Program
for j in range(len(cur_plan_test_inputs)):
assert (
type(cur_plan_test_inputs[j])
is supported_program_type_table[
type(self._get_program_input(program, program_plan_id, j))
]
), "The type {}-th input in {}-th test set of {}-th execution plan does not meet Program's requirement: expected {} but get {}".format(
j,
i,
program_plan_id,
supported_program_type_table[
type(self._get_program_input(program, program_plan_id, j))
],
type(cur_plan_test_inputs[j]),
)
# type of tensor input should match execution plan
if type(cur_plan_test_inputs[j]) is torch.Tensor:
# pyre-fixme[16]: Undefined attribute [16]: Item `bool` of `typing.Union[bool, float, int, torch._tensor.Tensor]`
# has no attribute `dtype`.
assert cur_plan_test_inputs[j].dtype == self._get_input_dtype(
program, program_plan_id, j
), "The input tensor {} dtype shall be {}, but now is {}".format(
cur_plan_test_inputs[j],
self._get_input_dtype(program, program_plan_id, j),
cur_plan_test_inputs[j].dtype,
)
elif type(cur_plan_test_inputs[j]) in (
int,
bool,
float,
):
assert type(cur_plan_test_inputs[j]) is self._get_input_type(
program, program_plan_id, j
), "The input primitive dtype shall be {}, but now is {}".format(
self._get_input_type(program, program_plan_id, j),
type(cur_plan_test_inputs[j]),
)
# Check if bundled expected output in the current exeution plan test share same type as output in Program
for j in range(len(cur_plan_test_expected_outputs)):
assert (
type(cur_plan_test_expected_outputs[j]) is torch.Tensor
), "The {}-th expected output shall be a tensor, but now is {}".format(
j, type(cur_plan_test_expected_outputs[j])
)
# pyre-fixme[16]: Undefined attribute [16]: Item `bool` of `typing.Union[bool, float, int, torch._tensor.Tensor]`
# has no attribute `dtype`.
assert cur_plan_test_expected_outputs[
j
].dtype == self._get_output_dtype(
program, program_plan_id, j
), "The label tensor {} dtype shall be {}, but now is {}".format(
cur_plan_test_expected_outputs[j],
self._get_output_dtype(program, program_plan_id, j),
cur_plan_test_expected_outputs[j].dtype,
)
def _extract_program(
self,
executorch_program: Union[
ExecutorchProgram,
ExecutorchProgramManager,
],
):
if isinstance(executorch_program, ExecutorchProgramManager):
program = executorch_program.executorch_program
else:
assert isinstance(executorch_program, ExecutorchProgram)
program = executorch_program.program
return program