|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# All rights reserved. |
| 3 | +# |
| 4 | +# This source code is licensed under the BSD-style license found in the |
| 5 | +# LICENSE file in the root directory of this source tree. |
| 6 | + |
| 7 | +# pyre-strict |
| 8 | + |
| 9 | +import logging |
| 10 | +from typing import Optional |
| 11 | + |
| 12 | +import executorch.exir.schema as schema |
| 13 | + |
| 14 | +import torch |
| 15 | +from executorch.exir.delegate import executorch_call_delegate |
| 16 | +from executorch.exir.lowered_backend_module import LoweredBackendModule |
| 17 | +from executorch.exir.tensor import TensorSpec |
| 18 | +from torch.fx.passes.infra.pass_base import PassBase, PassResult |
| 19 | + |
| 20 | +logger: logging.Logger = logging.getLogger(__name__) |
| 21 | + |
| 22 | +# CompileSpec key convention for specifying the target device. |
| 23 | +# Partitioners that target a specific device should include a CompileSpec entry |
| 24 | +# with this key and a value encoding the device string (e.g., b"cuda:0"). |
| 25 | +TARGET_DEVICE_COMPILE_SPEC_KEY = "target_device" |
| 26 | + |
| 27 | + |
| 28 | +def _parse_device_spec_value(value: bytes) -> tuple[schema.DeviceType, int]: |
| 29 | + """ |
| 30 | + Parse a target_device CompileSpec value (e.g., b"cuda:0") into |
| 31 | + (DeviceType, device_index). |
| 32 | +
|
| 33 | + The type portion is matched case-insensitively against schema.DeviceType |
| 34 | + member names (e.g., "cpu", "cuda"). Raises ValueError for unknown types. |
| 35 | + """ |
| 36 | + device_str = value.decode("utf-8").strip().lower() |
| 37 | + if ":" in device_str: |
| 38 | + type_str, index_str = device_str.split(":", 1) |
| 39 | + device_index = int(index_str) |
| 40 | + else: |
| 41 | + type_str = device_str |
| 42 | + device_index = 0 |
| 43 | + device_type = next( |
| 44 | + (dt for dt in schema.DeviceType if dt.name.lower() == type_str), |
| 45 | + None, |
| 46 | + ) |
| 47 | + if device_type is None: |
| 48 | + valid = ", ".join(dt.name for dt in schema.DeviceType) |
| 49 | + raise ValueError(f"Unknown device type '{type_str}'. Valid types: {valid}") |
| 50 | + return device_type, device_index |
| 51 | + |
| 52 | + |
| 53 | +def _get_lowered_module( |
| 54 | + graph_module: torch.fx.GraphModule, |
| 55 | + delegate_call_node: torch.fx.Node, |
| 56 | +) -> Optional[LoweredBackendModule]: |
| 57 | + """ |
| 58 | + Given an executorch_call_delegate node, retrieve the associated |
| 59 | + LoweredBackendModule from the graph module. |
| 60 | + The first argument to executorch_call_delegate is a get_attr node |
| 61 | + whose target names the LoweredBackendModule attribute. |
| 62 | + """ |
| 63 | + if len(delegate_call_node.args) < 1: |
| 64 | + return None |
| 65 | + lowered_node = delegate_call_node.args[0] |
| 66 | + if not isinstance(lowered_node, torch.fx.Node) or lowered_node.op != "get_attr": |
| 67 | + return None |
| 68 | + lowered_module = getattr(graph_module, lowered_node.target, None) |
| 69 | + if isinstance(lowered_module, LoweredBackendModule): |
| 70 | + return lowered_module |
| 71 | + return None |
| 72 | + |
| 73 | + |
| 74 | +def _get_target_device_from_compile_specs( |
| 75 | + lowered_module: LoweredBackendModule, |
| 76 | +) -> Optional[tuple[schema.DeviceType, int]]: |
| 77 | + """ |
| 78 | + Look for a CompileSpec with key TARGET_DEVICE_COMPILE_SPEC_KEY and return |
| 79 | + the corresponding (DeviceType, device_index), or None if not found. |
| 80 | + """ |
| 81 | + for spec in lowered_module.compile_specs: |
| 82 | + if spec.key == TARGET_DEVICE_COMPILE_SPEC_KEY: |
| 83 | + return _parse_device_spec_value(spec.value) |
| 84 | + return None |
| 85 | + |
| 86 | + |
| 87 | +def _set_device_on_spec( |
| 88 | + spec: TensorSpec, |
| 89 | + device_type: schema.DeviceType, |
| 90 | + device_index: int = 0, |
| 91 | +) -> None: |
| 92 | + """Set the device attribute on a TensorSpec.""" |
| 93 | + spec.device = device_type |
| 94 | + spec.device_index = device_index |
| 95 | + |
| 96 | + |
| 97 | +def _tag_specs_with_device( |
| 98 | + specs: object, |
| 99 | + device_type: schema.DeviceType, |
| 100 | + device_index: int = 0, |
| 101 | +) -> bool: |
| 102 | + """Apply device annotation to a TensorSpec or a collection of TensorSpecs. |
| 103 | +
|
| 104 | + Args: |
| 105 | + specs: A TensorSpec, a tuple/list of TensorSpecs, or None. |
| 106 | + device_type: The target device type to set. |
| 107 | + device_index: The device index (e.g., 0 for cuda:0, 1 for cuda:1). |
| 108 | +
|
| 109 | + Returns: |
| 110 | + True if any spec was modified, False otherwise. |
| 111 | + """ |
| 112 | + if specs is None: |
| 113 | + return False |
| 114 | + if isinstance(specs, TensorSpec): |
| 115 | + _set_device_on_spec(specs, device_type, device_index) |
| 116 | + return True |
| 117 | + if isinstance(specs, (tuple, list)): |
| 118 | + changed = False |
| 119 | + for s in specs: |
| 120 | + if isinstance(s, TensorSpec): |
| 121 | + _set_device_on_spec(s, device_type, device_index) |
| 122 | + changed = True |
| 123 | + return changed |
| 124 | + return False |
| 125 | + |
| 126 | + |
| 127 | +class PropagateDevicePass(PassBase): |
| 128 | + """ |
| 129 | + After to_backend, walk the graph and set device metadata on TensorSpecs |
| 130 | + based on partitioner-assigned delegation info. |
| 131 | +
|
| 132 | + Rules: |
| 133 | + 1. Delegated nodes: Input and output tensors of a delegate call are marked |
| 134 | + with the target device derived from the delegate's CompileSpec |
| 135 | + (key="target_device"). |
| 136 | + 2. Non-delegated nodes: Remain on CPU (default). |
| 137 | + 3. Getitem nodes that extract from a delegate call inherit the device from |
| 138 | + the delegate call's output spec at the corresponding index. |
| 139 | + """ |
| 140 | + |
| 141 | + def call(self, graph_module: torch.fx.GraphModule) -> PassResult: |
| 142 | + changed = False |
| 143 | + for node in graph_module.graph.nodes: |
| 144 | + if node.op == "call_function" and node.target == executorch_call_delegate: |
| 145 | + lowered_module = _get_lowered_module(graph_module, node) |
| 146 | + if lowered_module is None: |
| 147 | + continue |
| 148 | + |
| 149 | + result = _get_target_device_from_compile_specs(lowered_module) |
| 150 | + if result is None: |
| 151 | + continue |
| 152 | + |
| 153 | + target_device_type, device_index = result |
| 154 | + |
| 155 | + # Tag delegate input tensors. |
| 156 | + # args[0] is the get_attr node for the lowered module; skip it. |
| 157 | + for arg in node.args[1:]: |
| 158 | + if isinstance(arg, torch.fx.Node): |
| 159 | + changed |= _tag_specs_with_device( |
| 160 | + arg.meta.get("spec"), |
| 161 | + target_device_type, |
| 162 | + device_index, |
| 163 | + ) |
| 164 | + |
| 165 | + # Tag delegate output tensors. |
| 166 | + changed |= _tag_specs_with_device( |
| 167 | + node.meta.get("spec"), |
| 168 | + target_device_type, |
| 169 | + device_index, |
| 170 | + ) |
| 171 | + |
| 172 | + logger.debug( |
| 173 | + "PropagateDevicePass: set device=%s on delegate node %s " |
| 174 | + "(backend=%s)", |
| 175 | + target_device_type, |
| 176 | + node.name, |
| 177 | + lowered_module.backend_id, |
| 178 | + ) |
| 179 | + |
| 180 | + # Second pass: propagate device through getitem nodes that extract |
| 181 | + # individual outputs from a delegate call. |
| 182 | + for node in graph_module.graph.nodes: |
| 183 | + if node.op == "call_function" and node.target.__name__ == "getitem": |
| 184 | + source_node = node.args[0] |
| 185 | + if ( |
| 186 | + isinstance(source_node, torch.fx.Node) |
| 187 | + and source_node.op == "call_function" |
| 188 | + and source_node.target == executorch_call_delegate |
| 189 | + ): |
| 190 | + spec = node.meta.get("spec") |
| 191 | + source_specs = source_node.meta.get("spec") |
| 192 | + idx = node.args[1] |
| 193 | + if ( |
| 194 | + spec is not None |
| 195 | + and isinstance(spec, TensorSpec) |
| 196 | + and source_specs is not None |
| 197 | + and isinstance(source_specs, (tuple, list)) |
| 198 | + and isinstance(idx, int) |
| 199 | + and idx < len(source_specs) |
| 200 | + ): |
| 201 | + source_spec = source_specs[idx] |
| 202 | + if isinstance(source_spec, TensorSpec): |
| 203 | + _set_device_on_spec( |
| 204 | + spec, |
| 205 | + source_spec.device, |
| 206 | + source_spec.device_index, |
| 207 | + ) |
| 208 | + changed = True |
| 209 | + |
| 210 | + return PassResult(graph_module, changed) |
0 commit comments