From d3686263fea8c2c625f0c4a3d46a2e28485701e1 Mon Sep 17 00:00:00 2001 From: Fabio Gomez Date: Thu, 1 Jun 2023 10:44:16 -0500 Subject: [PATCH 1/5] Fix formatting in the FX Graph Mode Quantization guide (#2362) * removed ### lines and numbered in headlines * removed numbered from titles * added blank lines to show code * Remove the empty TODO placeholder --------- Co-authored-by: Svetlana Karslioglu --- .../fx_graph_mode_quant_guide.rst | 27 +++++++++---------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/prototype_source/fx_graph_mode_quant_guide.rst b/prototype_source/fx_graph_mode_quant_guide.rst index bb360861b9..9072e488a4 100644 --- a/prototype_source/fx_graph_mode_quant_guide.rst +++ b/prototype_source/fx_graph_mode_quant_guide.rst @@ -4,7 +4,7 @@ **Author**: `Jerry Zhang `_ FX Graph Mode Quantization requires a symbolically traceable model. -We use the FX framework (TODO: link) to convert a symbolically traceable nn.Module instance to IR, +We use the FX framework to convert a symbolically traceable nn.Module instance to IR, and we operate on the IR to execute the quantization passes. Please post your question about symbolically tracing your model in `PyTorch Discussion Forum `_ @@ -22,16 +22,19 @@ You can use any combination of these options: b. Write your own observed and quantized submodule -#################################################################### If the code that is not symbolically traceable does not need to be quantized, we have the following two options to run FX Graph Mode Quantization: -1.a. Symbolically trace only the code that needs to be quantized + + +Symbolically trace only the code that needs to be quantized ----------------------------------------------------------------- When the whole model is not symbolically traceable but the submodule we want to quantize is symbolically traceable, we can run quantization only on that submodule. + before: .. code:: python + class M(nn.Module): def forward(self, x): x = non_traceable_code_1(x) @@ -42,6 +45,7 @@ before: after: .. code:: python + class FP32Traceable(nn.Module): def forward(self, x): x = traceable_code(x) @@ -69,8 +73,7 @@ Note if original model needs to be preserved, you will have to copy it yourself before calling the quantization APIs. -##################################################### -1.b. Skip symbolically trace the non-traceable code +Skip symbolically trace the non-traceable code --------------------------------------------------- When we have some non-traceable code in the module, and this part of code doesn’t need to be quantized, we can factor out this part of the code into a submodule and skip symbolically trace that submodule. @@ -134,8 +137,7 @@ quantization code: If the code that is not symbolically traceable needs to be quantized, we have the following two options: -########################################################## -2.a Refactor your code to make it symbolically traceable +Refactor your code to make it symbolically traceable -------------------------------------------------------- If it is easy to refactor the code and make the code symbolically traceable, we can refactor the code and remove the use of non-traceable constructs in python. @@ -167,15 +169,10 @@ after: return x.permute(0, 2, 1, 3) -quantization code: - This can be combined with other approaches and the quantization code depends on the model. - - -####################################################### -2.b. Write your own observed and quantized submodule +Write your own observed and quantized submodule ----------------------------------------------------- If the non-traceable code can’t be refactored to be symbolically traceable, @@ -207,8 +204,8 @@ non-traceable logic, wrapped in a module class FP32NonTraceable: ... - -2. Define observed version of FP32NonTraceable +2. Define observed version of +FP32NonTraceable .. code:: python From c5501e78a19f7cae71cc91fb5a9ead1c283e9ee3 Mon Sep 17 00:00:00 2001 From: Mariia Mykhailova Date: Thu, 1 Jun 2023 08:49:51 -0700 Subject: [PATCH 2/5] Redirect "Finetuning Torchvision Models" to "TorchVision Object Detection Finetuning Tutorial" (#2378) --- .../finetuning_torchvision_models_tutorial.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 beginner_source/finetuning_torchvision_models_tutorial.rst diff --git a/beginner_source/finetuning_torchvision_models_tutorial.rst b/beginner_source/finetuning_torchvision_models_tutorial.rst new file mode 100644 index 0000000000..711f4b0f99 --- /dev/null +++ b/beginner_source/finetuning_torchvision_models_tutorial.rst @@ -0,0 +1,10 @@ +Finetuning Torchvision Models +============================= + +This tutorial has been moved to https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html + +It will redirect in 3 seconds. + +.. raw:: html + + From 9633e5f141eefbe62e5dcb8168b9e34d505058d0 Mon Sep 17 00:00:00 2001 From: Sergii Dymchenko Date: Thu, 1 Jun 2023 09:31:10 -0700 Subject: [PATCH 3/5] Fix docathon-label-sync.py to not fail on PRs without description (#2379) See https://github.com/pytorch/tutorials/actions/runs/5140794478/jobs/9252588225?pr=2377 as an example --- .github/scripts/docathon-label-sync.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/scripts/docathon-label-sync.py b/.github/scripts/docathon-label-sync.py index 597f4b5e03..5da80f24f5 100644 --- a/.github/scripts/docathon-label-sync.py +++ b/.github/scripts/docathon-label-sync.py @@ -14,6 +14,9 @@ def main(): repo = g.get_repo(f'{repo_owner}/{repo_name}') pull_request = repo.get_pull(pull_request_number) pull_request_body = pull_request.body + # PR without description + if pull_request_body is None: + return # get issue number from the PR body if not re.search(r'#\d{1,5}', pull_request_body): From d9fd5bae719632632f96865bc198dd266905bacc Mon Sep 17 00:00:00 2001 From: Qasim Khan Date: Thu, 1 Jun 2023 21:39:27 +0500 Subject: [PATCH 4/5] Change batchify desc to remove ambiguity (#2383) Co-authored-by: Carl Parker --- beginner_source/transformer_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beginner_source/transformer_tutorial.py b/beginner_source/transformer_tutorial.py index 57d1f8d859..cce52eefdb 100644 --- a/beginner_source/transformer_tutorial.py +++ b/beginner_source/transformer_tutorial.py @@ -149,7 +149,7 @@ def forward(self, x: Tensor) -> Tensor: # into ``batch_size`` columns. If the data does not divide evenly into # ``batch_size`` columns, then the data is trimmed to fit. For instance, with # the alphabet as the data (total length of 26) and ``batch_size=4``, we would -# divide the alphabet into 4 sequences of length 6: +# divide the alphabet into sequences of length 6, resulting in 4 of such sequences. # # .. math:: # \begin{bmatrix} From 4cd44ae2dd4cfdd5f923302d4e6af234b5af0ece Mon Sep 17 00:00:00 2001 From: Kiersten Stokes Date: Thu, 1 Jun 2023 12:19:25 -0500 Subject: [PATCH 5/5] Change formatting of code blocks for correct rendering in Colab (#2398) --- .../tensorboard_profiler_tutorial.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/intermediate_source/tensorboard_profiler_tutorial.py b/intermediate_source/tensorboard_profiler_tutorial.py index 440f2257e1..2b241071b7 100644 --- a/intermediate_source/tensorboard_profiler_tutorial.py +++ b/intermediate_source/tensorboard_profiler_tutorial.py @@ -18,7 +18,7 @@ ----- To install ``torch`` and ``torchvision`` use the following command: -:: +.. code-block:: pip install torch torchvision @@ -160,7 +160,7 @@ def train(data): # # Install PyTorch Profiler TensorBoard Plugin. # -# :: +# .. code-block:: # # pip install torch_tb_profiler # @@ -168,7 +168,7 @@ def train(data): ###################################################################### # Launch the TensorBoard. # -# :: +# .. code-block:: # # tensorboard --logdir=./log # @@ -176,7 +176,7 @@ def train(data): ###################################################################### # Open the TensorBoard profile URL in Google Chrome browser or Microsoft Edge browser. # -# :: +# .. code-block:: # # http://localhost:6006/#pytorch_profiler # @@ -287,7 +287,7 @@ def train(data): # In this example, we follow the "Performance Recommendation" and set ``num_workers`` as below, # pass a different name such as ``./log/resnet18_4workers`` to ``tensorboard_trace_handler``, and run it again. # -# :: +# .. code-block:: # # train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4) # @@ -316,7 +316,7 @@ def train(data): # # You can try it by using existing example on Azure # -# :: +# .. code-block:: # # pip install azure-storage-blob # tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/memory_demo_1_10 @@ -366,7 +366,7 @@ def train(data): # # You can try it by using existing example on Azure: # -# :: +# .. code-block:: # # pip install azure-storage-blob # tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/distributed_bert