Skip to content

Commit

Permalink
[AOTI] Turn on the ABI-compatible mode as default (pytorch#136534)
Browse files Browse the repository at this point in the history
Summary: Make AOTI generate ABI-compatible code as default for OSS.

Pull Request resolved: pytorch#136534
Approved by: https://github.com/chenyang78
ghstack dependencies: pytorch#137660
  • Loading branch information
desertfire authored and pytorchmergebot committed Oct 13, 2024
1 parent b181652 commit cfc5d18
Show file tree
Hide file tree
Showing 5 changed files with 16 additions and 10 deletions.
3 changes: 2 additions & 1 deletion .ci/pytorch/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -749,7 +749,8 @@ test_inductor_torchbench_cpu_smoketest_perf(){
fi
cat "$output_name"
# The threshold value needs to be actively maintained to make this check useful.
python benchmarks/dynamo/check_perf_csv.py -f "$output_name" -t "$speedup_target"
# Allow 1% variance for CPU perf to accommodate perf fluctuation
python benchmarks/dynamo/check_perf_csv.py -f "$output_name" -t "$speedup_target" -s 0.99
done

# Add a few ABI-compatible accuracy tests for CPU. These can be removed once we turn on ABI-compatible as default.
Expand Down
13 changes: 10 additions & 3 deletions benchmarks/dynamo/check_perf_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import pandas as pd


def check_perf_csv(filename, threshold):
def check_perf_csv(filename, threshold, threshold_scale):
"""
Basic performance checking.
"""
Expand All @@ -16,7 +16,7 @@ def check_perf_csv(filename, threshold):
for _, row in df.iterrows():
model_name = row["name"]
speedup = row["speedup"]
if speedup < threshold:
if speedup < threshold * threshold_scale:
failed.append(model_name)

print(f"{model_name:34} {speedup}")
Expand All @@ -39,5 +39,12 @@ def check_perf_csv(filename, threshold):
parser.add_argument(
"--threshold", "-t", type=float, help="threshold speedup value to check against"
)
parser.add_argument(
"--threshold-scale",
"-s",
type=float,
default=1.0,
help="multiple threshold by this value to relax the check",
)
args = parser.parse_args()
check_perf_csv(args.file, args.threshold)
check_perf_csv(args.file, args.threshold, args.threshold_scale)
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ resnet50,inductor,float32,dynamic,default,1.67742767
#timm_efficientnet,inductor,float32,static,cpp,
mobilenet_v3_large,inductor,float32,static,cpp,2.63311706
timm_resnest,inductor,float32,dynamic,cpp,1.7321529
functorch_maml_omniglot,inductor,float32,dynamic,cpp,1.17617472
functorch_maml_omniglot,inductor,float32,dynamic,cpp,1.126799
#hf_GPT2,inductor,float32,dynamic,cpp,
yolov3,export-aot-inductor,float32,static,default,1.40687424
mobilenet_v2,export-aot-inductor,float32,static,default,2.90375357
Expand Down
4 changes: 1 addition & 3 deletions torch/_inductor/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,7 @@ def autotune_remote_cache_default() -> Optional[bool]:
cpp_wrapper = os.environ.get("TORCHINDUCTOR_CPP_WRAPPER", "0") == "1"

# codegen cpp wrapper code in an ABI compatible mode
abi_compatible = (
os.environ.get("TORCHINDUCTOR_ABI_COMPATIBLE", "1" if is_fbcode() else "0") == "1"
)
abi_compatible = os.environ.get("TORCHINDUCTOR_ABI_COMPATIBLE", "1") == "1"

c_shim_version = os.environ.get("TORCHINDUCTOR_C_SHIM_VERSION", "2")

Expand Down
4 changes: 2 additions & 2 deletions torch/_inductor/mkldnn_ir.py
Original file line number Diff line number Diff line change
Expand Up @@ -1266,7 +1266,7 @@ def create(cls, x, w, B, attr, scalars, algorithm):
constant_args.insert(0, None)

packed = LinearUnary(
layout=FlexibleLayout(
layout=FixedLayout(
device=x.get_device(),
dtype=x.get_dtype(),
size=output_size,
Expand Down Expand Up @@ -1347,7 +1347,7 @@ def create(cls, x, y, w, B, attr):
constant_args.insert(0, B)

packed = LinearBinary(
layout=FlexibleLayout(
layout=FixedLayout(
device=x.get_device(),
dtype=x.get_dtype(),
size=output_size,
Expand Down

0 comments on commit cfc5d18

Please sign in to comment.