-
Notifications
You must be signed in to change notification settings - Fork 26.3k
Closed
Labels
good first issuemodule: loggingFeatures which make it easier to tell what PyTorch is doing under the hoodFeatures which make it easier to tell what PyTorch is doing under the hoodoncall: exportoncall: pt2
Description
🐛 Describe the bug
When used without TORCH_COMPILE_DEBUG=1, the following code is fine (it can run). When used with TORCH_COMPILE_DEBUG=1, it ran into the error "TypeError:arg is neither SymInt/int nor torch.Tensor, None
".
repro:
import os
os.environ["TORCH_COMPILE_DEBUG"] = "1"
import torch
class Model(torch.nn.Module):
def forward(self, x, ks0):
return x.sum()
example_inputs = (
torch.tensor([0, 3, 6], device="cuda", dtype=torch.int64),
70,
)
_ = torch._export.aot_compile(
Model(),
example_inputs,
)
print("done")
error
/torch/_inductor/compile_fx.py:751, in fx_codegen_and_compile(gm, example_inputs, cudagraphs, static_input_idxs, is_backward, graph_id, cpp_wrapper, aot_mode, is_inference, user_visible_outputs, layout_opt, extern_node_serializer)
740 return fd.getvalue()
742 torch._logging.trace_structured(
743 "artifact",
744 metadata_fn=lambda: {
(...)
748 payload_fn=lambda: log_graph_runnable(),
749 )
--> 751 V.debug.fx_graph(gm, example_inputs)
752 # TODO: Should we actually dump this? It should be redundant with the aot
753 # structured logs...
754 # trace_structured("inductor_input_graph", payload_fn=lambda: gm.print_readable(print_output=False))
756 shape_env = shape_env_from_inputs(example_inputs)
/torch/_inductor/debug.py:474, in DebugFormatter.fx_graph(self, gm, inputs)
468 def fx_graph(
469 self,
470 gm: torch.fx.GraphModule,
471 inputs: List[torch.Tensor],
472 ) -> None:
473 with self.fopen("fx_graph_runnable.py") as fd:
--> 474 save_graph_repro(fd, gm, inputs, "inductor")
476 with self.fopen("fx_graph_readable.py") as fd:
477 fd.write(gm.print_readable(print_output=False))
/torch/_dynamo/repro/after_aot.py:279, in save_graph_repro(fd, gm, args, compiler_name, stable_output, save_dir, command, accuracy, tracing_mode, check_str)
274 fd.write(
275 "Repro is not generated due to existence of BackwardState in graph input"
276 )
277 return
278 fd.write(
--> 279 generate_compiler_repro_string(
280 gm,
281 args,
282 stable_output=stable_output,
283 save_dir=save_dir,
284 )
285 )
286 if accuracy is None:
287 accuracy = "_accuracy" in compiler_name
/torch/_dynamo/repro/after_aot.py:249, in generate_compiler_repro_string(gm, args, stable_output, save_dir)
247 writer.tensor(placeholder, arg)
248 else:
--> 249 raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}")
251 model_str += "\n".join(writer.lines()) + "\n"
253 model_str += "mod = Repro()\n"
TypeError: arg is neither SymInt/int nor torch.Tensor, None
Versions
trunk
cc @ezyang @chauhang @penguinwu @avikchaudhuri @gmagogsfm @zhxchen17 @tugsbayasgalan @angelayi @suo @ydwu4
Metadata
Metadata
Assignees
Labels
good first issuemodule: loggingFeatures which make it easier to tell what PyTorch is doing under the hoodFeatures which make it easier to tell what PyTorch is doing under the hoodoncall: exportoncall: pt2