Hi, team, when I try to test the torch.compile
using the following code on my macbookpro:
import torch
from torch import nn
class BackboneModel(nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.conv1 = nn.Conv2d(6, 6, 6)
self.bn1 = nn.BatchNorm2d(6)
self.conv2 = nn.Conv2d(6, 6, 6)
self.bn2 = nn.BatchNorm2d(6)
self.conv3 = nn.Conv2d(6, 6, 6)
self.bn3 = nn.BatchNorm2d(6)
def forward(self, x):
# this conv-bn pair can use efficient_conv_bn_eval feature
x = self.bn1(self.conv1(x))
# this conv-bn pair cannot use efficient_conv_bn_eval feature
# because `self.conv2` is used twice
x = self.bn2(self.conv2(self.conv2(x)))
# this conv-bn pair can use efficient_conv_bn_eval feature
# just for the first forward of the `self.bn3`
x = self.bn3(self.bn3(self.conv3(x)))
return x
model = BackboneModel()
model.eval()
input = torch.randn(64, 6, 32, 32)
opt_model = torch.compile(model)
output = opt_model(input)
I got tons of error outputs, seemingly to complain that omp.h
is not found.
Can we add a default fallback backend to just return the forward
function from fx.GraphModule
when the chosen backend does not work?