diff --git a/.ci/docker/Dockerfile b/.ci/docker/Dockerfile
new file mode 100644
index 00000000000..8aefbfe8f47
--- /dev/null
+++ b/.ci/docker/Dockerfile
@@ -0,0 +1,25 @@
+ARG BASE_IMAGE
+FROM ${BASE_IMAGE}
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install common dependencies (so that this step can be cached separately)
+COPY ./common/install_base.sh install_base.sh
+RUN bash ./install_base.sh && rm install_base.sh
+
+# Setup user
+# TODO: figure out how to remove this part
+COPY ./common/install_user.sh install_user.sh
+RUN bash ./install_user.sh && rm install_user.sh
+
+COPY ./common/install_docs_reqs.sh install_docs_reqs.sh
+RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
+
+COPY ./common/install_pip_requirements.sh install_pip_requirements.sh
+COPY ./requirements.txt requirements.txt
+RUN bash ./install_pip_requirements.sh && rm install_pip_requirements.sh
+
+RUN ln -s /usr/bin/python3 /usr/bin/python
+
+USER ci-user
+CMD ["bash"]
diff --git a/.ci/docker/build.sh b/.ci/docker/build.sh
new file mode 100755
index 00000000000..f40c45fea3d
--- /dev/null
+++ b/.ci/docker/build.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+set -exu
+
+IMAGE_NAME="$1"
+shift
+
+export UBUNTU_VERSION="22.04"
+export CUDA_VERSION="12.6.3"
+
+export BASE_IMAGE="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
+echo "Building ${IMAGE_NAME} Docker image"
+
+docker build \
+ --no-cache \
+ --progress=plain \
+ -f Dockerfile \
+ --build-arg BASE_IMAGE="${BASE_IMAGE}" \
+ "$@" \
+ .
diff --git a/.ci/docker/common/install_base.sh b/.ci/docker/common/install_base.sh
new file mode 100644
index 00000000000..3100b550a89
--- /dev/null
+++ b/.ci/docker/common/install_base.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+# Based off of https://github.com/pytorch/pytorch/tree/b52e0bf131a4e55cd987176f9c5a8d2ad6783b4f/.ci/docker
+
+set -ex
+
+install_ubuntu() {
+ # Install common dependencies
+ apt-get update
+ # TODO: Some of these may not be necessary
+ apt-get install -y --no-install-recommends \
+ build-essential \
+ ca-certificates \
+ cmake=3.22* \
+ curl \
+ git \
+ wget \
+ sudo \
+ vim \
+ jq \
+ vim \
+ unzip \
+ gdb \
+ rsync \
+ libssl-dev \
+ p7zip-full \
+ libglfw3 \
+ libglfw3-dev \
+ sox \
+ libsox-dev \
+ libsox-fmt-all \
+ python3-pip \
+ python3-dev
+
+ # Cleanup package manager
+ apt-get autoclean && apt-get clean
+ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
+}
+
+# Install base packages depending on the base OS
+ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
+case "$ID" in
+ ubuntu)
+ install_ubuntu
+ ;;
+ *)
+ echo "Unable to determine OS..."
+ exit 1
+ ;;
+esac
diff --git a/.ci/docker/common/install_docs_reqs.sh b/.ci/docker/common/install_docs_reqs.sh
new file mode 100644
index 00000000000..541c9976ad1
--- /dev/null
+++ b/.ci/docker/common/install_docs_reqs.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Based off of https://github.com/pytorch/pytorch/tree/b52e0bf131a4e55cd987176f9c5a8d2ad6783b4f/.ci/docker
+set -ex
+
+apt-get update
+apt-get install -y gpg-agent
+
+curl --retry 3 -sL https://deb.nodesource.com/setup_20.x | sudo -E bash -
+sudo apt-get install -y nodejs
+
+curl --retry 3 -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
+echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
+
+apt-get update
+apt-get install -y --no-install-recommends yarn
+yarn global add katex --prefix /usr/local
+
+sudo apt-get -y install doxygen
+
+apt-get autoclean && apt-get clean
+rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
diff --git a/.ci/docker/common/install_pip_requirements.sh b/.ci/docker/common/install_pip_requirements.sh
new file mode 100644
index 00000000000..a548d200462
--- /dev/null
+++ b/.ci/docker/common/install_pip_requirements.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+set -ex
+
+# Install pip packages
+pip install --upgrade pip
+pip install -r ./requirements.txt
diff --git a/.ci/docker/common/install_user.sh b/.ci/docker/common/install_user.sh
new file mode 100644
index 00000000000..6deb62086bc
--- /dev/null
+++ b/.ci/docker/common/install_user.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# Copied from https://github.com/pytorch/executorch/blob/6e431355a554e5f84c3a05dfa2b981ead90c2b48/.ci/docker/common/install_user.sh#L1
+
+set -ex
+
+# Same as ec2-user
+echo "ci-user:x:1000:1000::/var/lib/ci-user:" >> /etc/passwd
+echo "ci-user:x:1000:" >> /etc/group
+# Needed on Focal or newer
+echo "ci-user:*:19110:0:99999:7:::" >> /etc/shadow
+
+# Create $HOME
+mkdir -p /var/lib/ci-user
+chown ci-user:ci-user /var/lib/ci-user
+
+# Allow sudo
+echo 'ci-user ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/ci-user
+
+# Test that sudo works
+sudo -u ci-user sudo -v
diff --git a/.ci/docker/requirements.txt b/.ci/docker/requirements.txt
new file mode 100644
index 00000000000..964f9aad4ab
--- /dev/null
+++ b/.ci/docker/requirements.txt
@@ -0,0 +1,78 @@
+# --extra-index-url https://download.pytorch.org/whl/cu117/index.html # Use this to run/publish tutorials against the latest binaries during the RC stage. Comment out after the release. Each release verify the correct cuda version.
+# Refer to ./jenkins/build.sh for tutorial build instructions.
+
+# Sphinx dependencies
+sphinx==7.2.6
+sphinx-gallery==0.19.0
+sphinx-reredirects==0.1.4
+sphinx_design==0.6.1
+docutils>=0.18.1,<0.21
+sphinx-copybutton==0.5.2
+sphinx_sitemap==2.7.1
+sphinxcontrib-mermaid==1.0.0
+sphinxcontrib.katex==0.9.10
+pypandoc==1.15
+pandocfilters==1.5.1
+markdown==3.8.2
+
+# PyTorch Theme
+-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@c2e38b37f3c432c610639f06d1d421c6df4c225c#egg=pytorch_sphinx_theme2
+
+# Tutorial dependencies
+tqdm==4.66.1
+numpy==1.24.4
+matplotlib
+librosa
+torch==2.8
+torchvision
+torchdata
+networkx
+PyHamcrest
+bs4
+awscliv2==2.1.1
+flask
+spacy==3.4.1
+ray[tune]==2.7.2
+tensorboard
+jinja2==3.1.3
+pytorch-lightning
+torchx
+torchrl==0.9.2
+tensordict==0.9.1
+# For ax_multiobjective_nas_tutorial.py
+ax-platform>=0.4.0,<0.5.0
+nbformat>=5.9.2
+datasets
+transformers
+onnx
+onnxscript>=0.2.2
+onnxruntime
+evaluate
+accelerate>=0.20.1
+
+importlib-metadata==6.8.0
+
+ipython
+
+sphinxcontrib.katex
+# to run examples
+boto3
+pandas
+requests
+scikit-image
+scipy==1.11.1
+numba==0.57.1
+pillow==10.2.0
+wget
+gym==0.26.2
+gym-super-mario-bros==7.4.0
+pyopengl
+gymnasium[mujoco]==0.27.0
+timm
+pygame==2.6.0
+pycocotools
+semilearn==0.3.2
+torchao==0.10.0
+segment_anything==1.0
+torchrec==1.2.0; platform_system == "Linux"
+fbgemm-gpu==1.2.0; platform_system == "Linux"
diff --git a/.circleci/config.yml b/.circleci/config.yml
deleted file mode 100644
index a84b1eef2e8..00000000000
--- a/.circleci/config.yml
+++ /dev/null
@@ -1,286 +0,0 @@
-install_official_git_client: &install_official_git_client
- name: Install Official Git Client
- no_output_timeout: "1h"
- command: |
- set -e
- sudo apt-get -qq update
- sudo apt-get -qq install openssh-client git
-
-# This system setup script is meant to run before the CI-related scripts, e.g.,
-# installing Git client, checking out code, setting up CI env, and
-# building/testing.
-setup_linux_system_environment: &setup_linux_system_environment
- name: Set Up System Environment
- no_output_timeout: "1h"
- command: |
- set -ex
-
- # Set up CircleCI GPG keys for apt, if needed
- curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
-
- # Stop background apt updates. Hypothetically, the kill should not
- # be necessary, because stop is supposed to send a kill signal to
- # the process, but we've added it for good luck. Also
- # hypothetically, it's supposed to be unnecessary to wait for
- # the process to block. We also have that line for good luck.
- # If you like, try deleting them and seeing if it works.
- sudo systemctl stop apt-daily.service || true
- sudo systemctl kill --kill-who=all apt-daily.service || true
-
- sudo systemctl stop unattended-upgrades.service || true
- sudo systemctl kill --kill-who=all unattended-upgrades.service || true
-
- # wait until `apt-get update` has been killed
- while systemctl is-active --quiet apt-daily.service
- do
- sleep 1;
- done
- while systemctl is-active --quiet unattended-upgrades.service
- do
- sleep 1;
- done
-
- # See if we actually were successful
- systemctl list-units --all | cat
-
- sudo apt-get purge -y unattended-upgrades
-
- cat /etc/apt/sources.list
-
- ps auxfww | grep [a]pt
- ps auxfww | grep dpkg
-
-pytorch_tutorial_build_defaults: &pytorch_tutorial_build_defaults
- machine:
- image: ubuntu-1604:201903-01
- steps:
- - checkout
- - run:
- <<: *setup_linux_system_environment
- - run:
- name: Set Up CI Environment
- no_output_timeout: "1h"
- command: |
- set -e
-
- # Set up NVIDIA docker repo
- curl -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
- echo "deb https://nvidia.github.io/libnvidia-container/ubuntu16.04/amd64 /" | sudo tee -a /etc/apt/sources.list.d/nvidia-docker.list
- echo "deb https://nvidia.github.io/nvidia-container-runtime/ubuntu16.04/amd64 /" | sudo tee -a /etc/apt/sources.list.d/nvidia-docker.list
- echo "deb https://nvidia.github.io/nvidia-docker/ubuntu16.04/amd64 /" | sudo tee -a /etc/apt/sources.list.d/nvidia-docker.list
-
- sudo apt-get -y update
- sudo apt-get -y remove linux-image-generic linux-headers-generic linux-generic docker-ce
- # WARNING: Docker version is hardcoded here; you must update the
- # version number below for docker-ce and nvidia-docker2 to get newer
- # versions of Docker. We hardcode these numbers because we kept
- # getting broken CI when Docker would update their docker version,
- # and nvidia-docker2 would be out of date for a day until they
- # released a newer version of their package.
- #
- # How to figure out what the correct versions of these packages are?
- # My preferred method is to start a Docker instance of the correct
- # Ubuntu version (e.g., docker run -it ubuntu:16.04) and then ask
- # apt what the packages you need are. Note that the CircleCI image
- # comes with Docker.
- sudo apt-get -y install \
- linux-headers-$(uname -r) \
- linux-image-generic \
- moreutils \
- docker-ce=5:18.09.4~3-0~ubuntu-xenial \
- nvidia-container-runtime=2.0.0+docker18.09.4-1 \
- nvidia-docker2=2.0.3+docker18.09.4-1 \
- expect-dev
-
- sudo pkill -SIGHUP dockerd
-
- sudo pip -q install awscli==1.16.35
-
- if [ -n "${CUDA_VERSION}" ]; then
- DRIVER_FN="NVIDIA-Linux-x86_64-410.104.run"
- wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
- sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
- nvidia-smi
- fi
-
- # This IAM user only allows read-write access to ECR
- export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_ONLY}
- export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_ONLY}
- eval $(aws ecr get-login --region us-east-1 --no-include-email)
- - run:
- name: Build
- no_output_timeout: "20h"
- command: |
- set -e
-
- export pyTorchDockerImageTag=291
- echo "PyTorchDockerImageTag: "${pyTorchDockerImageTag}
-
- cat >/home/circleci/project/ci_build_script.sh </dev/null
- if [ -n "${CUDA_VERSION}" ]; then
- export id=$(docker run --runtime=nvidia -t -d -w /var/lib/jenkins ${DOCKER_IMAGE})
- else
- export id=$(docker run -t -d -w /var/lib/jenkins ${DOCKER_IMAGE})
- fi
-
- echo "declare -x JOB_BASE_NAME=${CIRCLE_JOB}" > /home/circleci/project/env
- echo "declare -x COMMIT_ID=${CIRCLE_SHA1}" >> /home/circleci/project/env
- echo "declare -x COMMIT_SOURCE=${CIRCLE_BRANCH}" >> /home/circleci/project/env
- # DANGER! DO NOT REMOVE THE `set +x` SETTING HERE!
- set +x
- if [[ "$CIRCLE_BRANCH" == master ]]; then
- if [ -z "${CIRCLECI_AWS_ACCESS_KEY_FOR_PYTORCH_TUTORIAL_BUILD_MASTER_S3_BUCKET}" ]; then exit 1; fi
- if [ -z "${CIRCLECI_AWS_SECRET_KEY_FOR_PYTORCH_TUTORIAL_BUILD_MASTER_S3_BUCKET}" ]; then exit 1; fi
- if [ -z "${GITHUB_PYTORCHBOT_USERNAME}" ]; then exit 1; fi
- if [ -z "${GITHUB_PYTORCHBOT_TOKEN}" ]; then exit 1; fi
-
- echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_PYTORCH_TUTORIAL_BUILD_MASTER_S3_BUCKET}" >> /home/circleci/project/env
- echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_PYTORCH_TUTORIAL_BUILD_MASTER_S3_BUCKET}" >> /home/circleci/project/env
- echo "declare -x GITHUB_PYTORCHBOT_USERNAME=${GITHUB_PYTORCHBOT_USERNAME}" >> /home/circleci/project/env
- echo "declare -x GITHUB_PYTORCHBOT_TOKEN=${GITHUB_PYTORCHBOT_TOKEN}" >> /home/circleci/project/env
- else
- echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_PYTORCH_TUTORIAL_BUILD_PR_S3_BUCKET}" >> /home/circleci/project/env
- echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_PYTORCH_TUTORIAL_BUILD_PR_S3_BUCKET}" >> /home/circleci/project/env
- fi
- set -x
-
- docker cp /home/circleci/project/. "$id:/var/lib/jenkins/workspace"
-
- export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./ci_build_script.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
- echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
-
-pytorch_tutorial_build_worker_defaults: &pytorch_tutorial_build_worker_defaults
- environment:
- DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3"
- CUDA_VERSION: "9"
- resource_class: gpu.medium
- <<: *pytorch_tutorial_build_defaults
-
-pytorch_tutorial_build_manager_defaults: &pytorch_tutorial_build_manager_defaults
- environment:
- DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3"
- resource_class: medium
- <<: *pytorch_tutorial_build_defaults
-
-version: 2
-jobs:
- pytorch_tutorial_build_worker_0:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_1:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_2:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_3:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_4:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_5:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_6:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_7:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_8:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_9:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_10:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_11:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_12:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_13:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_14:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_15:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_16:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_17:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_18:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_worker_19:
- <<: *pytorch_tutorial_build_worker_defaults
-
- pytorch_tutorial_build_manager:
- <<: *pytorch_tutorial_build_manager_defaults
-
-workflows:
- version: 2
- build:
- jobs:
- - pytorch_tutorial_build_worker_0:
- context: org-member
- - pytorch_tutorial_build_worker_1:
- context: org-member
- - pytorch_tutorial_build_worker_2:
- context: org-member
- - pytorch_tutorial_build_worker_3:
- context: org-member
- - pytorch_tutorial_build_worker_4:
- context: org-member
- - pytorch_tutorial_build_worker_5:
- context: org-member
- - pytorch_tutorial_build_worker_6:
- context: org-member
- - pytorch_tutorial_build_worker_7:
- context: org-member
- - pytorch_tutorial_build_worker_8:
- context: org-member
- - pytorch_tutorial_build_worker_9:
- context: org-member
- - pytorch_tutorial_build_worker_10:
- context: org-member
- - pytorch_tutorial_build_worker_11:
- context: org-member
- - pytorch_tutorial_build_worker_12:
- context: org-member
- - pytorch_tutorial_build_worker_13:
- context: org-member
- - pytorch_tutorial_build_worker_14:
- context: org-member
- - pytorch_tutorial_build_worker_15:
- context: org-member
- - pytorch_tutorial_build_worker_16:
- context: org-member
- - pytorch_tutorial_build_worker_17:
- context: org-member
- - pytorch_tutorial_build_worker_18:
- context: org-member
- - pytorch_tutorial_build_worker_19:
- context: org-member
- - pytorch_tutorial_build_manager:
- context: org-member
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
new file mode 100644
index 00000000000..4928e536acf
--- /dev/null
+++ b/.devcontainer/Dockerfile
@@ -0,0 +1,8 @@
+FROM mcr.microsoft.com/vscode/devcontainers/python:3.8
+
+COPY requirements.txt /tmp/pip-tmp/
+
+RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
+ && apt-get install git gcc unzip make -y \
+ && pip3 install --disable-pip-version-check --no-cache-dir -r /tmp/pip-tmp/requirements.txt \
+ && rm -rf /tmp/pip-tmp
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 00000000000..86fe20483c5
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,18 @@
+{
+ "name": "PyTorch Tutorials",
+ "build": {
+ "context": "..",
+ "dockerfile": "Dockerfile",
+ "args": {}
+ },
+ "settings": {
+ "terminal.integrated.shell.linux": "/bin/bash",
+ "workbench.startupEditor": "none",
+ "files.autoSave": "afterDelay",
+ "python.dataScience.enabled": true,
+ "python.dataScience.alwaysTrustNotebooks": true,
+ "python.insidersChannel": "weekly",
+ "python.showStartPage": false
+ },
+ "extensions": ["ms-python.python", "lextudio.restructuredtext"]
+}
diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt
new file mode 100644
index 00000000000..2be1df895be
--- /dev/null
+++ b/.devcontainer/requirements.txt
@@ -0,0 +1,31 @@
+# Refer to ./jenkins/build.sh for tutorial build instructions
+
+sphinx==1.8.2
+sphinx-gallery==0.3.1
+tqdm
+numpy
+matplotlib
+torch
+torchvision
+torchtext
+torchaudio
+PyHamcrest
+bs4
+awscli==1.16.35
+flask
+spacy
+ray[tune]
+
+# PyTorch Theme
+-e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
+
+ipython
+
+# to run examples
+pandas
+scikit-image
+pillow==10.3.0
+wget
+
+# for codespaces env
+pylint
diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml
new file mode 100644
index 00000000000..937417f4999
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug-report.yml
@@ -0,0 +1,60 @@
+name: 🐛 Bug Report
+description: Create a tutorial bug report
+title: "[BUG] - "
+labels: [
+ "bug"
+]
+
+body:
+- type: markdown
+ attributes:
+ value: >
+ #### Before submitting a bug, please make sure the issue hasn't been already addressed by searching through [the existing and past issues](https://github.com/pytorch/tutorials/issues?q=is%3Aissue+sort%3Acreated-desc+).
+- type: textarea
+ attributes:
+ label: Add Link
+ description: |
+ **Add the link to the tutorial***
+ placeholder: |
+ Link to the tutorial on the website:
+ validations:
+ required: true
+- type: textarea
+ attributes:
+ label: Describe the bug
+ description: |
+ **Add the bug description**
+ placeholder: |
+ Provide a detailed description of the issue with code samples if relevant
+ ```python
+
+ # Sample code to reproduce the problem if relevant
+ ```
+
+ **Expected Result:** (Describe what you were expecting to see)
+
+
+ **Actual Result:** (Describe the result)
+
+ ```
+ The error message you got, with the full traceback.
+ ```
+
+ validations:
+ required: true
+- type: textarea
+ attributes:
+ label: Describe your environment
+ description: |
+ **Describe the environment you encountered the bug in:**
+ placeholder: |
+ * Platform (i.e macOS, Linux, Google Colab):
+ * CUDA (yes/no, version?):
+ * PyTorch version (run `python -c "import torch; print(torch.__version__)"`):
+
+ validations:
+ required: true
+- type: markdown
+ attributes:
+ value: >
+ Thanks for contributing 🎉!
diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml
new file mode 100644
index 00000000000..c1c449c29fe
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature-request.yml
@@ -0,0 +1,37 @@
+name: 🚀 Feature request
+description: Submit a proposal for a new PyTorch tutorial or improvement of an existing tutorial
+title: "💡 [REQUEST] - "
+labels: [
+ "feature"
+]
+
+body:
+- type: textarea
+ attributes:
+ label: 🚀 Describe the improvement or the new tutorial
+ description: |
+ **Describe the improvement**
+ placeholder: |
+ Explain why this improvement or new tutorial is important. For example, *"This tutorial will help users to better understand feature X of PyTorch."* If there is a tutorial that you propose to replace, add here. If this is related to another GitHub issue, add a link here.
+ validations:
+ required: true
+- type: textarea
+ attributes:
+ label: Existing tutorials on this topic
+ description: |
+ **Add a list of existing tutorials on the same topic.**
+ placeholder: |
+ List tutorials that already explain this functionality if exist. On pytorch.org or elsewhere.
+ * Link
+ * Link
+- type: textarea
+ attributes:
+ label: Additional context
+ description: |
+ **Add additional context**
+ placeholder: |
+ Add any other context or screenshots about the feature request.
+- type: markdown
+ attributes:
+ value: >
+ Thanks for contributing 🎉!
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000000..8c3604b99fb
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,11 @@
+Fixes #ISSUE_NUMBER
+
+## Description
+
+
+## Checklist
+
+- [ ] The issue that is being fixed is referred in the description (see above "Fixes #ISSUE_NUMBER")
+- [ ] Only one issue is addressed in this pull request
+- [ ] Labels from the issue that this PR is fixing are added to this pull request
+- [ ] No unnecessary issues are included into this pull request.
diff --git a/.github/pytorch-probot.yml b/.github/pytorch-probot.yml
new file mode 100644
index 00000000000..6d0e8803efb
--- /dev/null
+++ b/.github/pytorch-probot.yml
@@ -0,0 +1 @@
+tracking_issue: 1896
diff --git a/.github/scripts/check_redirects.sh b/.github/scripts/check_redirects.sh
new file mode 100755
index 00000000000..6aa31819820
--- /dev/null
+++ b/.github/scripts/check_redirects.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+if [ "$CURRENT_BRANCH" == "$BASE_BRANCH" ]; then
+ echo "Running on $BASE_BRANCH branch. Skipping check."
+ exit 0
+fi
+
+
+# Get list of deleted or renamed files in this branch compared to base
+DELETED_FILES=$(git diff --name-status $BASE_BRANCH $CURRENT_BRANCH --diff-filter=DR | awk '{print $2}' | grep -E '\.(rst|py|md)$' | grep -v 'redirects.py')
+# Check if any deleted or renamed files were found
+if [ -z "$DELETED_FILES" ]; then
+ echo "No deleted or renamed files found. Skipping check."
+ exit 0
+fi
+
+echo "Deleted or renamed files:"
+echo "$DELETED_FILES"
+
+# Check if redirects.py has been updated
+REDIRECTS_UPDATED=$(git diff --name-status $BASE_BRANCH $CURRENT_BRANCH --diff-filter=AM | grep 'redirects.py' && echo "yes" || echo "no")
+
+if [ "$REDIRECTS_UPDATED" == "no" ]; then
+ echo "ERROR: Files were deleted or renamed but redirects.py was not updated. Please update .github/scripts/redirects.py to redirect these files."
+ exit 1
+fi
+
+# Check if each deleted file has a redirect entry
+MISSING_REDIRECTS=0
+for FILE in $DELETED_FILES; do
+ # Convert file path to URL path format (remove extension and adjust path)
+ REDIRECT_PATH=$(echo $FILE | sed -E 's/(.+)_source\/(.+)\.(py|rst|md)$/\1\/\2.html/')
+
+ # Check if this path exists in redirects.py as a key. We don't check for values.
+ if ! grep -q "\"$REDIRECT_PATH\":" redirects.py; then
+ echo "ERROR: Missing redirect for deleted file: $FILE (should have entry for \"$REDIRECT_PATH\")"
+ MISSING_REDIRECTS=1
+ fi
+done
+
+if [ $MISSING_REDIRECTS -eq 1 ]; then
+ echo "ERROR: Please add redirects for all deleted/renamed files to redirects.py"
+ exit 1
+fi
+
+echo "All deleted/renamed files have proper redirects. Check passed!"
diff --git a/.github/scripts/docathon-label-sync.py b/.github/scripts/docathon-label-sync.py
new file mode 100644
index 00000000000..7241e1370ce
--- /dev/null
+++ b/.github/scripts/docathon-label-sync.py
@@ -0,0 +1,46 @@
+import os
+from github import Github
+import sys
+import re
+
+def main():
+ token = os.environ.get('GITHUB_TOKEN')
+
+ repo_owner = "pytorch"
+ repo_name = "tutorials"
+ pull_request_number = int(sys.argv[1])
+
+ g = Github(token)
+ repo = g.get_repo(f'{repo_owner}/{repo_name}')
+ pull_request = repo.get_pull(pull_request_number)
+ pull_request_body = pull_request.body
+ # PR without description
+ if pull_request_body is None:
+ return
+
+ # get issue number from the PR body
+ if not re.search(r'#\d{1,5}', pull_request_body):
+ print("The pull request does not mention an issue.")
+ return
+ issue_number = int(re.findall(r'#(\d{1,5})', pull_request_body)[0])
+ issue = repo.get_issue(issue_number)
+ issue_labels = issue.labels
+ docathon_label_present = any(label.name == 'docathon-h1-2025' for label in issue_labels)
+
+ # if the issue has a docathon label, add all labels from the issue to the PR.
+ if not docathon_label_present:
+ print("The 'docathon-h1-2025' label is not present in the issue.")
+ return
+ pull_request_labels = pull_request.get_labels()
+ issue_label_names = [label.name for label in issue_labels]
+ labels_to_add = [label for label in issue_label_names if label not in pull_request_labels]
+ if not labels_to_add:
+ print("The pull request already has the same labels.")
+ return
+ pull_request.add_to_labels(*labels_to_add)
+ print("Labels added to the pull request!")
+
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.github/workflows/MonthlyLinkCheck.yml b/.github/workflows/MonthlyLinkCheck.yml
new file mode 100644
index 00000000000..aaddcda926f
--- /dev/null
+++ b/.github/workflows/MonthlyLinkCheck.yml
@@ -0,0 +1,44 @@
+#Runs once a month and checks links in the repo to ensure they are valid
+#If action fails, it creates an issue with the failing links and an "incorrect link" label
+#If link is valid but failing, it can be added to the .lycheeignore file
+#Action can also be run manually as needed.
+
+
+name: Monthly Link Check
+on:
+ schedule:
+ - cron: '0 0 1 * *' # Runs at midnight on the first day of every month
+ workflow_dispatch: # Allows manual triggering of the workflow
+jobs:
+ linkChecker:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 1
+ - name: Check Links
+ id: lychee
+ uses: lycheeverse/lychee-action@v2
+ with:
+ args: --accept=200,403,429 --base . --verbose --no-progress './**/*.md' './**/*.html' './**/*.rst'
+ token: ${{ secrets.CUSTOM_TOKEN }}
+ fail: true
+
+ - name: Create Issue From File
+ if: failure() && (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch')
+ uses: peter-evans/create-issue-from-file@v5
+ with:
+ title: Broken links detected in docs 🔗
+ content-filepath: ./lychee/out.md
+ labels: 'incorrect link'
+ #token: ${{ secrets.CUSTOM_TOKEN }}
+
+
+ - name: Suggestions
+ if: failure()
+ run: |
+ echo -e "\nPlease review the links reported in the Check links step above."
+ echo -e "If a link is valid but fails due to a CAPTCHA challenge, IP blocking, login requirements, etc., consider adding such links to .lycheeignore file to bypass future checks.\n"
+ exit 1
diff --git a/.github/workflows/StalePRs.yml b/.github/workflows/StalePRs.yml
new file mode 100644
index 00000000000..e7393948518
--- /dev/null
+++ b/.github/workflows/StalePRs.yml
@@ -0,0 +1,156 @@
+# A workflow copied from the pytorch/pytorch repo stale PRs that implements similar logic to actions/stale.
+#
+# Compared to actions/stale, it is implemented to make API requests proportional
+# to the number of stale PRs, not the total number of issues in the repo. This
+# is because PyTorch has a lot of issues/PRs, so the actions/stale runs into
+# rate limits way too quickly.
+#
+# The behavior is:
+# - If a PR is not labeled stale, after 60 days inactivity label the PR as stale and comment about it.
+# - If a PR is labeled stale, after 30 days inactivity close the PR.
+# - `high priority` and `no-stale` PRs are exempt.
+
+name: Close stale pull requests
+
+on:
+ schedule:
+ # Run at midnight UTC.
+ - cron: '0 0 * * *'
+ workflow_dispatch:
+
+jobs:
+ stale:
+ if: ${{ github.repository == 'pytorch/tutorials' }}
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ pull-requests: write
+
+ steps:
+ - uses: actions/github-script@v6
+ with:
+ script: |
+ // Do some dumb retries on requests.
+ const retries = 7;
+ const baseBackoff = 100;
+ const sleep = timeout => new Promise(resolve => setTimeout(resolve, timeout));
+ github.hook.wrap('request', async (request, options) => {
+ for (let attempt = 1; attempt <= retries; attempt++) {
+ try {
+ return await request(options);
+ } catch (err) {
+ if (attempt < retries) {
+ core.warning(`Request getting retried. Attempt: ${attempt}`);
+ await sleep(baseBackoff * Math.pow(2, attempt));
+ continue;
+ }
+ throw err;
+ }
+ }
+ });
+
+ const MAX_API_REQUESTS = 100;
+
+ // If a PRs not labeled stale, label them stale after no update for 60 days.
+ const STALE_LABEL_THRESHOLD_MS = 1000 * 60 * 60 * 24 * 60;
+ // For PRs already labeled stale, close after not update for 30 days.
+ const STALE_CLOSE_THRESHOLD_MS = 1000 * 60 * 60 * 24 * 30;
+
+ const STALE_MESSAGE =
+ "Looks like this PR hasn't been updated in a while so we're going to go ahead and mark this as `stale`. " +
+ "Feel free to remove the `stale` label if you feel this was a mistake. " +
+ "If you are unable to remove the `stale` label please contact a maintainer in order to do so. " +
+ "If you want the bot to never mark this PR stale again, add the `no-stale` label. " +
+ "`stale` pull requests will automatically be closed after 30 days of inactivity. ";
+
+ let numAPIRequests = 0;
+ let numProcessed = 0;
+
+ async function processPull(pull) {
+ core.info(`[${pull.number}] URL: ${pull.html_url}`);
+ numProcessed += 1;
+ const labels = pull.labels.map((label) => label.name);
+
+ // Skip if certain labels are present.
+ if (labels.includes("no-stale") || labels.includes("high priority")) {
+ core.info(`[${pull.number}] Skipping because PR has an exempting label.`);
+ return false;
+ }
+
+ // Check if the PR is stale, according to our configured thresholds.
+ let staleThresholdMillis;
+ if (labels.includes("stale")) {
+ core.info(`[${pull.number}] PR is labeled stale, checking whether we should close it.`);
+ staleThresholdMillis = STALE_CLOSE_THRESHOLD_MS;
+ } else {
+ core.info(`[${pull.number}] Checking whether to label PR as stale.`);
+ staleThresholdMillis = STALE_LABEL_THRESHOLD_MS;
+ }
+
+ const millisSinceLastUpdated =
+ new Date().getTime() - new Date(pull.updated_at).getTime();
+
+ if (millisSinceLastUpdated < staleThresholdMillis) {
+ core.info(`[${pull.number}] Skipping because PR was updated recently`);
+ return false;
+ }
+
+ // At this point, we know we should do something.
+ // For PRs already labeled stale, close them.
+ if (labels.includes("stale")) {
+ core.info(`[${pull.number}] Closing PR.`);
+ numAPIRequests += 1;
+ await github.rest.issues.update({
+ owner: "pytorch",
+ repo: "tutorials",
+ issue_number: pull.number,
+ state: "closed",
+ });
+ } else {
+ // For PRs not labeled stale, label them stale.
+ core.info(`[${pull.number}] Labeling PR as stale.`);
+
+ numAPIRequests += 1;
+ await github.rest.issues.createComment({
+ owner: "pytorch",
+ repo: "tutorials",
+ issue_number: pull.number,
+ body: STALE_MESSAGE,
+ });
+
+ numAPIRequests += 1;
+ await github.rest.issues.addLabels({
+ owner: "pytorch",
+ repo: "tutorials",
+ issue_number: pull.number,
+ labels: ["stale"],
+ });
+ }
+ }
+
+ for await (const response of github.paginate.iterator(
+ github.rest.pulls.list,
+ {
+ owner: "pytorch",
+ repo: "tutorials",
+ state: "open",
+ sort: "created",
+ direction: "asc",
+ per_page: 100,
+ }
+ )) {
+ numAPIRequests += 1;
+ const pulls = response.data;
+ // Awaiting in a loop is intentional here. We want to serialize execution so
+ // that log groups are printed correctl
+ for (const pull of pulls) {
+ if (numAPIRequests > MAX_API_REQUESTS) {
+ core.warning("Max API requests exceeded, exiting.");
+ process.exit(0);
+ }
+ await core.group(`Processing PR #${pull.number}`, async () => {
+ await processPull(pull);
+ });
+ }
+ }
+ core.info(`Processed ${numProcessed} PRs total.`);
diff --git a/.github/workflows/build-tutorials.yml b/.github/workflows/build-tutorials.yml
new file mode 100644
index 00000000000..94cfd5843a0
--- /dev/null
+++ b/.github/workflows/build-tutorials.yml
@@ -0,0 +1,183 @@
+name: Build tutorials
+
+on:
+ pull_request:
+ push:
+ branches:
+ - main
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
+ cancel-in-progress: true
+
+jobs:
+ worker:
+ name: pytorch_tutorial_build_worker
+ strategy:
+ matrix:
+ include:
+ - { shard: 1, num_shards: 15, runner: "linux.g5.12xlarge.nvidia.gpu" }
+ - { shard: 2, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 3, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 4, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 5, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 6, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 7, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 8, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 9, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 10, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 11, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 12, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 13, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 14, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ - { shard: 15, num_shards: 15, runner: "linux.g5.4xlarge.nvidia.gpu" }
+ fail-fast: false
+ runs-on: ${{ matrix.runner }}
+ steps:
+ - name: Setup SSH (Click me for login details)
+ uses: pytorch/test-infra/.github/actions/setup-ssh@main
+ with:
+ github-secret: ${{ secrets.GITHUB_TOKEN }}
+ instructions: |
+ All testing is done inside the container, to start an interactive session run:
+ docker exec -it $(docker container ps --format '{{.ID}}') bash
+
+ - name: Checkout Tutorials
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Setup Linux
+ uses: pytorch/pytorch/.github/actions/setup-linux@main
+
+ - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
+ uses: pytorch/test-infra/.github/actions/setup-nvidia@main
+
+ - name: Calculate/build docker image
+ id: calculate-docker-image
+ uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
+ with:
+ docker-image-name: tutorials
+
+ - name: Pull docker image
+ uses: pytorch/test-infra/.github/actions/pull-docker-image@main
+ with:
+ docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
+
+ - name: Build
+ shell: bash
+ env:
+ DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
+ NUM_WORKERS: ${{ matrix.num_shards }}
+ WORKER_ID: ${{ matrix.shard }}
+ COMMIT_ID: ${{ github.sha }}
+ JOB_TYPE: worker
+ COMMIT_SOURCE: ${{ github.ref }}
+ run: |
+ set -ex
+
+ chmod +x ".jenkins/build.sh"
+
+ container_name=$(docker run \
+ ${GPU_FLAG:-} \
+ -e WORKER_ID \
+ -e NUM_WORKERS \
+ -e COMMIT_ID \
+ -e JOB_TYPE \
+ -e COMMIT_SOURCE \
+ --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
+ --tty \
+ --detach \
+ --shm-size=2gb \
+ --name="${container_name}" \
+ -v "${GITHUB_WORKSPACE}:/var/lib/workspace" \
+ -w /var/lib/workspace \
+ "${DOCKER_IMAGE}"
+ )
+
+ docker exec -u ci-user -t "${container_name}" sh -c ".jenkins/build.sh"
+
+ - name: Teardown Linux
+ uses: pytorch/test-infra/.github/actions/teardown-linux@main
+ if: always()
+
+ manager:
+ name: pytorch_tutorial_build_manager
+ needs: worker
+ runs-on: [self-hosted, linux.2xlarge]
+ environment: ${{ github.ref == 'refs/heads/main' && 'pytorchbot-env' || '' }}
+ steps:
+ - name: Setup SSH (Click me for login details)
+ uses: pytorch/test-infra/.github/actions/setup-ssh@main
+ with:
+ github-secret: ${{ secrets.GITHUB_TOKEN }}
+ instructions: |
+ All testing is done inside the container, to start an interactive session run:
+ docker exec -it $(docker container ps --format '{{.ID}}') bash
+
+ - name: Checkout Tutorials
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Setup Linux
+ uses: pytorch/pytorch/.github/actions/setup-linux@main
+
+ - name: Calculate/build docker image
+ id: calculate-docker-image
+ uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
+ with:
+ docker-image-name: tutorials
+
+ - name: Pull docker image
+ uses: pytorch/test-infra/.github/actions/pull-docker-image@main
+ with:
+ docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
+
+ - name: Build
+ shell: bash
+ env:
+ DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
+ NUM_WORKERS: 15
+ WORKER_ID: ${{ matrix.shard }}
+ COMMIT_ID: ${{ github.sha }}
+ JOB_TYPE: manager
+ COMMIT_SOURCE: ${{ github.ref }}
+ GITHUB_PYTORCHBOT_TOKEN: ${{ secrets.PYTORCHBOT_TOKEN }}
+ run: |
+ set -ex
+
+ chmod +x ".jenkins/build.sh"
+
+ container_name=$(docker run \
+ ${GPU_FLAG:-} \
+ -e WORKER_ID \
+ -e NUM_WORKERS \
+ -e COMMIT_ID \
+ -e JOB_TYPE \
+ -e COMMIT_SOURCE \
+ -e GITHUB_PYTORCHBOT_TOKEN \
+ --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
+ --tty \
+ --detach \
+ --name="${container_name}" \
+ -v "${GITHUB_WORKSPACE}:/var/lib/workspace" \
+ -w /var/lib/workspace \
+ "${DOCKER_IMAGE}"
+ )
+
+ docker exec -u ci-user -t "${container_name}" sh -c ".jenkins/build.sh"
+
+ - name: Upload docs preview
+ uses: seemethere/upload-artifact-s3@v5
+ if: ${{ github.event_name == 'pull_request' }}
+ with:
+ retention-days: 14
+ s3-bucket: doc-previews
+ if-no-files-found: error
+ path: docs
+ s3-prefix: pytorch/tutorials/${{ github.event.pull_request.number }}
+
+ - name: Teardown Linux
+ uses: pytorch/test-infra/.github/actions/teardown-linux@main
+ if: always()
diff --git a/.github/workflows/check-redirects.yml b/.github/workflows/check-redirects.yml
new file mode 100644
index 00000000000..380e3989bf4
--- /dev/null
+++ b/.github/workflows/check-redirects.yml
@@ -0,0 +1,25 @@
+name: Check Redirects for Deleted or Renamed Files
+
+on:
+ pull_request:
+ paths:
+ - '*/**/*.rst'
+ - '*/**/*.py'
+ - '*/**/*.md'
+
+jobs:
+ check-redirects:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Run redirect check script
+ run: |
+ chmod +x ./.github/scripts/check_redirects.sh
+ ./.github/scripts/check_redirects.sh
+ env:
+ BASE_BRANCH: ${{ github.base_ref }}
+ CURRENT_BRANCH: ${{ github.head_ref }}
diff --git a/.github/workflows/docathon-assign.yml b/.github/workflows/docathon-assign.yml
new file mode 100644
index 00000000000..8eef2b2fc88
--- /dev/null
+++ b/.github/workflows/docathon-assign.yml
@@ -0,0 +1,60 @@
+name: Assign User on Comment
+
+on:
+ workflow_dispatch:
+ issue_comment:
+ types: [created]
+
+jobs:
+ assign:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ steps:
+ - name: Check for "/assigntome" in comment
+ uses: actions/github-script@v6
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ script: |
+ const issueComment = context.payload.comment.body;
+ const assignRegex = /\/assigntome/i;
+ if (assignRegex.test(issueComment)) {
+ const assignee = context.payload.comment.user.login;
+ const issueNumber = context.payload.issue.number;
+ try {
+ const { data: issue } = await github.rest.issues.get({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: issueNumber
+ });
+ const hasLabel = issue.labels.some(label => label.name === 'docathon-h1-2025');
+ if (hasLabel) {
+ if (issue.assignee !== null) {
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: issueNumber,
+ body: "The issue is already assigned. Please pick an opened and unnasigned issue with the [docathon-h1-2025 label](https://github.com/pytorch/pytorch/issues?q=is%3Aopen+is%3Aissue+label%3Adocathon-h1-2025)."
+ });
+ } else {
+ await github.rest.issues.addAssignees({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: issueNumber,
+ assignees: [assignee]
+ });
+ }
+ } else {
+ const commmentMessage = "This issue does not have the correct label. Please pick an opened and unnasigned issue with the [docathon-h1-2025 label](https://github.com/pytorch/pytorch/issues?q=is%3Aopen+is%3Aissue+label%3Adocathon-h1-2025)."
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: issueNumber,
+ body: commmentMessage
+ });
+ }
+ } catch (error) {
+ console.error(error);
+ }
+ }
diff --git a/.github/workflows/docathon-label-sync.yml b/.github/workflows/docathon-label-sync.yml
new file mode 100644
index 00000000000..1b33bebaac2
--- /dev/null
+++ b/.github/workflows/docathon-label-sync.yml
@@ -0,0 +1,29 @@
+name: Docathon Labels Sync
+
+on:
+ pull_request_target:
+ types: [opened, synchronize, edited]
+
+jobs:
+ check-labels:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ pull-requests: write
+ steps:
+ - name: Check if PR mentions an issue and get labels
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 1
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.x
+ - name: Install dependencies
+ run: |
+ pip install requests
+ pip install PyGithub
+ - name: Run Python script
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: python ./.github/scripts/docathon-label-sync.py ${{ github.event.pull_request.number }}
diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml
new file mode 100644
index 00000000000..6d75d1fc929
--- /dev/null
+++ b/.github/workflows/docker-build.yml
@@ -0,0 +1,59 @@
+name: Docker Build
+
+on:
+ workflow_dispatch:
+ pull_request:
+ paths:
+ - .ci/docker/**
+ - .github/workflows/docker-builds.yml
+ push:
+ branches:
+ - main
+ paths:
+ - .ci/docker/**
+ - .github/workflows/docker-builds.yml
+
+jobs:
+ docker-build:
+ runs-on: [self-hosted, linux.2xlarge]
+ timeout-minutes: 240
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - docker-image-name: tutorials
+ env:
+ DOCKER_IMAGE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/tutorials/${{ matrix.docker-image-name }}
+ steps:
+ - name: Clean workspace
+ shell: bash
+ run: |
+ echo "${GITHUB_WORKSPACE}"
+ sudo rm -rf "${GITHUB_WORKSPACE}"
+ mkdir "${GITHUB_WORKSPACE}"
+
+ - name: Setup SSH (Click me for login details)
+ uses: pytorch/test-infra/.github/actions/setup-ssh@main
+ with:
+ github-secret: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Setup Linux
+ uses: pytorch/test-infra/.github/actions/setup-linux@main
+
+ - name: Build docker image
+ id: build-docker-image
+ uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
+ with:
+ docker-image-name: ${{ matrix.docker-image-name }}
+ push: true
+
+ - name: Teardown Linux
+ uses: pytorch/test-infra/.github/actions/teardown-linux@main
+ if: always()
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
+ cancel-in-progress: true
diff --git a/.github/workflows/link_checkPR.yml b/.github/workflows/link_checkPR.yml
new file mode 100644
index 00000000000..830e470c1c0
--- /dev/null
+++ b/.github/workflows/link_checkPR.yml
@@ -0,0 +1,57 @@
+#Checks links in a PR to ensure they are valid. If link is valid but failing, it can be added to the .lycheeignore file
+#Use the skip-link-check label on a PR to skip checking links on a PR
+
+name: link check on PR
+
+on:
+ pull_request:
+ branches: [main]
+
+jobs:
+ linkChecker:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 1
+
+ - name: Get Changed Files
+ id: changed-files
+ uses: tj-actions/changed-files@d6e91a2266cdb9d62096cebf1e8546899c6aa18f # v45.0.6
+
+ - name: Check for Skip Label
+ id: skip-label
+ uses: actions/github-script@v6
+ with:
+ script: |
+ const labels = await github.rest.issues.listLabelsOnIssue({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number
+ });
+ return labels.data.some(label => label.name === 'skip-link-check');
+
+ - name: Check Links
+ if: steps.skip-label.outputs.result == 'false'
+ uses: lycheeverse/lychee-action@v1
+ with:
+ args: --accept=200,403,429 --base . --verbose --no-progress ${{ steps.changed-files.outputs.all_changed_files }}
+ token: ${{ secrets.CUSTOM_TOKEN }}
+ fail: true
+
+ - name: Skip Message
+ if: steps.skip-label.outputs.result == 'true'
+ run: echo "Link check was skipped due to the presence of the 'skip-link-check' label."
+
+ # Per tj-actions, a delete file is not a changed file so this ensures lint checking does not occur on deleted files
+ - name: No Files to Check
+ if: steps.skip-label.outputs.result == 'false' && steps.changed-files.outputs.any_changed == 'true'
+ run: echo "No relevant files were changed in this PR that require link checking."
+
+ - name: Suggestions
+ if: failure()
+ run: |
+ echo -e "\nPlease review the links reported in the Check links step above."
+ echo -e "If a link is valid but fails due to a CAPTCHA challenge, IP blocking, login requirements, etc., consider adding such links to .lycheeignore file to bypass future checks.\n"
+ exit 1
diff --git a/.github/workflows/lintrunner.yml b/.github/workflows/lintrunner.yml
new file mode 100644
index 00000000000..e1a6889eb28
--- /dev/null
+++ b/.github/workflows/lintrunner.yml
@@ -0,0 +1,38 @@
+name: Lintrunner
+
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
+ cancel-in-progress: true
+
+jobs:
+ lintrunner:
+ name: lintrunner
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout tutorials
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+
+ - name: Setup Python
+ uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+ with:
+ python-version: '3.12'
+
+ - name: Install Lintrunner
+ run: |
+ pip install lintrunner==0.12.5
+ lintrunner init
+
+ - name: Run lintrunner on all files - Linux
+ run: |
+ set +e
+ if ! lintrunner -v --force-color --all-files --tee-json=lint.json; then
+ echo ""
+ echo -e "\e[1m\e[36mYou can reproduce these results locally by using \`lintrunner -m main\`.\e[0m"
+ exit 1
+ fi
diff --git a/.github/workflows/spelling.yml b/.github/workflows/spelling.yml
new file mode 100644
index 00000000000..e1cba836c96
--- /dev/null
+++ b/.github/workflows/spelling.yml
@@ -0,0 +1,153 @@
+name: Check spelling
+
+on:
+ pull_request:
+ push:
+ branches:
+ - main
+
+jobs:
+ pyspelling:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Check for skip label and get changed files
+ id: check-files
+ uses: actions/github-script@v6
+ with:
+ script: |
+ let skipCheck = false;
+ let changedFiles = [];
+
+ if (context.eventName === 'pull_request') {
+ // Check for skip label
+ const { data: labels } = await github.rest.issues.listLabelsOnIssue({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number
+ });
+ skipCheck = labels.some(label => label.name === 'skip-spell-check');
+
+ if (!skipCheck) {
+ // Get changed files in PR
+ const { data: files } = await github.rest.pulls.listFiles({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ pull_number: context.issue.number
+ });
+
+ changedFiles = files
+ .filter(file => file.filename.match(/\.(py|rst|md)$/))
+ .map(file => file.filename);
+ }
+ } else {
+ // For push events, we'll still need to use git diff
+ // We'll handle this after checkout
+ }
+
+ core.setOutput('skip', skipCheck.toString());
+ core.setOutput('files', changedFiles.join('\n'));
+ core.setOutput('is-pr', (context.eventName === 'pull_request').toString());
+
+ - uses: actions/checkout@v4
+ if: steps.check-files.outputs.skip != 'true'
+ with:
+ fetch-depth: 0
+
+ - name: Get changed files for push event
+ if: |
+ steps.check-files.outputs.skip != 'true' &&
+ steps.check-files.outputs.is-pr != 'true'
+ id: push-files
+ run: |
+ CHANGED_FILES=$(git diff --name-only HEAD^..HEAD -- '*.py' '*.rst' '*.md')
+ echo "files<> $GITHUB_OUTPUT
+ echo "$CHANGED_FILES" >> $GITHUB_OUTPUT
+ echo "EOF" >> $GITHUB_OUTPUT
+
+ - name: Check if relevant files changed
+ if: steps.check-files.outputs.skip != 'true'
+ id: check
+ run: |
+ if [ "${{ steps.check-files.outputs.is-pr }}" == "true" ]; then
+ FILES="${{ steps.check-files.outputs.files }}"
+ else
+ FILES="${{ steps.push-files.outputs.files }}"
+ fi
+
+ if [ -z "$FILES" ]; then
+ echo "skip=true" >> $GITHUB_OUTPUT
+ echo "No relevant files changed (*.py, *.rst, *.md), skipping spell check"
+ else
+ echo "skip=false" >> $GITHUB_OUTPUT
+ echo "Found changed files to check:"
+ echo "$FILES"
+ fi
+
+ - uses: actions/setup-python@v4
+ if: |
+ steps.check-files.outputs.skip != 'true' &&
+ steps.check.outputs.skip != 'true'
+ with:
+ python-version: '3.9'
+ cache: 'pip'
+
+ - name: Install dependencies
+ if: |
+ steps.check-files.outputs.skip != 'true' &&
+ steps.check.outputs.skip != 'true'
+ run: |
+ pip install pyspelling
+ sudo apt-get install aspell aspell-en
+
+ - name: Run spell check on each file
+ id: spellcheck
+ if: |
+ steps.check-files.outputs.skip != 'true' &&
+ steps.check.outputs.skip != 'true'
+ run: |
+ if [ "${{ steps.check-files.outputs.is-pr }}" == "true" ]; then
+ mapfile -t FILES <<< "${{ steps.check-files.outputs.files }}"
+ else
+ mapfile -t FILES <<< "${{ steps.push-files.outputs.files }}"
+ fi
+
+ # Check each file individually
+ FINAL_EXIT_CODE=0
+ SPELLCHECK_LOG=""
+ for file in "${FILES[@]}"; do
+ if [ -n "$file" ]; then
+ echo "Checking spelling in $file"
+ python3 -c "import yaml; config = yaml.safe_load(open('.pyspelling.yml')); new_matrix = [matrix.copy() for matrix in config['matrix'] if (('python' in matrix['name'].lower() and '$file'.endswith('.py')) or ('rest' in matrix['name'].lower() and '$file'.endswith('.rst')) or ('markdown' in matrix['name'].lower() and '$file'.endswith('.md'))) and not matrix.update({'sources': ['$file']})]; config['matrix'] = new_matrix; yaml.dump(config, open('temp_config.yml', 'w'))"
+
+ if OUTPUT=$(pyspelling -c temp_config.yml 2>&1); then
+ echo "No spelling errors found in $file"
+ else
+ FINAL_EXIT_CODE=1
+ echo "Spelling errors found in $file:"
+ echo "$OUTPUT"
+ SPELLCHECK_LOG+="### $file\n$OUTPUT\n\n"
+ fi
+ fi
+ done
+
+ # Save the results to GITHUB_OUTPUT
+ echo "spell_failed=$FINAL_EXIT_CODE" >> $GITHUB_OUTPUT
+ echo "spell_log<> $GITHUB_OUTPUT
+ echo "$SPELLCHECK_LOG" >> $GITHUB_OUTPUT
+ echo "SPELLEOF" >> $GITHUB_OUTPUT
+
+ if [ $FINAL_EXIT_CODE -ne 0 ]; then
+ echo "Spell check failed! See above for details."
+ echo
+ echo "Here are a few tips:"
+ echo "- All PyTorch API objects must be in double backticks or use an intersphinx directive."
+ echo " Example: ``torch.nn``, :func:"
+ echo "- Consult en-wordlist.txt for spellings of some of the words."
+ echo " You can add a word to en-wordlist.txt if:"
+ echo " 1) It's a common abbreviation, like RNN."
+ echo " 2) It's a word widely accepted in the industry."
+ echo "- Please do not add words like 'dtype', 'torch.nn.Transformer' to pass spellcheck."
+ echo " Instead wrap it in double backticks or use an intersphinx directive."
+ echo
+ exit 1
+ fi
diff --git a/.gitignore b/.gitignore
index 3fb316b581f..3f1f927ee33 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,17 +3,24 @@ beginner
intermediate
advanced
pytorch_basics
+/recipes
+prototype
+/unstable
+sg_execution_times.rst
#data things
_data/
-beginner_source/hymenoptera_data
-beginner_source/blitz/data
-beginner_source/faces
-beginner_source/hybrid_frontend/data
-intermediate_source/data/
advanced_source/images/
-*data.zip
-faces.zip
+advanced_source/data/
+beginner_source/.data/
+beginner_source/data/
+beginner_source/blitz/data/
+beginner_source/faces/
+beginner_source/hybrid_frontend/data/
+beginner_source/hymenoptera_data/
+intermediate_source/data/
+*.zip
+MNIST/
#builds
_build/
@@ -28,6 +35,7 @@ __pycache__/
*.so
# Distribution / packaging
+src/
.Python
env/
build/
@@ -94,7 +102,7 @@ target/
.python-version
# celery beat schedule file
-celerybeat-schedule
+celerybeat-schedule
# dotenv
.env
@@ -112,3 +120,15 @@ ENV/
.DS_Store
cleanup.sh
*.swp
+
+# PyTorch things
+*.pt
+
+# VSCode
+*.vscode
+
+# pyspelling
+dictionary.dic
+
+# linters
+/.lintbin
diff --git a/.gitmodules b/.gitmodules
index 3a3c564c8fa..e69de29bb2d 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +0,0 @@
-[submodule "src/pytorch-sphinx-theme"]
- path = src/pytorch-sphinx-theme
- url = https://github.com/pytorch/pytorch_sphinx_theme
diff --git a/.jenkins/build.sh b/.jenkins/build.sh
index eaed1d00057..0187f6ba84c 100755
--- a/.jenkins/build.sh
+++ b/.jenkins/build.sh
@@ -1,150 +1,135 @@
+#!/bin/bash
+
set -ex
-if [[ "$COMMIT_SOURCE" == master ]]; then
- export BUCKET_NAME=pytorch-tutorial-build-master
-else
- export BUCKET_NAME=pytorch-tutorial-build-pull-request
-fi
+export BUCKET_NAME=pytorch-tutorial-build-pull-request
+
+# set locale for click dependency in spacy
+export LC_ALL=C.UTF-8
+export LANG=C.UTF-8
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
-sudo apt-get update
-sudo apt-get install -y --no-install-recommends unzip p7zip-full sox libsox-dev libsox-fmt-all rsync
+# Update root certificates by installing new libgnutls30
-export PATH=/opt/conda/bin:$PATH
-rm -rf src
-pip install -r $DIR/../requirements.txt
+# Install pandoc (does not install from pypi)
+sudo apt-get update
+sudo apt-get install -y pandoc
-export PATH=/opt/conda/bin:$PATH
-conda install -y sphinx==1.8.2 pandas
-# PyTorch Theme
-rm -rf src
-pip install -e git+git://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
-# pillow >= 4.2 will throw error when trying to write mode RGBA as JPEG,
-# this is a workaround to the issue.
-pip install sphinx-gallery tqdm matplotlib ipython pillow==4.1.1
+# NS: Path to python runtime should already be part of docker container
+# export PATH=/opt/conda/bin:$PATH
-# Install torchaudio from source
-git clone https://github.com/pytorch/audio --quiet
-pushd audio
-python setup.py install
-popd
+#Install PyTorch Nightly for test.
+# Nightly - pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu102/torch_nightly.html
+# Install 2.5 to merge all 2.4 PRs - uncomment to install nightly binaries (update the version as needed).
+# sudo pip uninstall -y fbgemm-gpu torchrec
+# sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata torchrl tensordict
+# sudo pip3 install fbgemm-gpu==1.1.0 torchrec==1.0.0 --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124
+# pip3 install torch==2.7.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126
+# Install two language tokenizers for Translation with TorchText tutorial
+python -m spacy download en_core_web_sm
+python -m spacy download de_core_news_sm
-aws configure set default.s3.multipart_threshold 5120MB
+awsv2 -i
+awsv2 configure set default.s3.multipart_threshold 5120MB
# Decide whether to parallelize tutorial builds, based on $JOB_BASE_NAME
-export NUM_WORKERS=20
-if [[ "${JOB_BASE_NAME}" == *worker_* ]]; then
+if [[ "${JOB_TYPE}" == "worker" ]]; then
# Step 1: Remove runnable code from tutorials that are not supposed to be run
- python $DIR/remove_runnable_code.py beginner_source/aws_distributed_training_tutorial.py beginner_source/aws_distributed_training_tutorial.py
+ python $DIR/remove_runnable_code.py beginner_source/aws_distributed_training_tutorial.py beginner_source/aws_distributed_training_tutorial.py || true
+ # Temp remove for mnist download issue. (Re-enabled for 1.8.1)
+ # python $DIR/remove_runnable_code.py beginner_source/fgsm_tutorial.py beginner_source/fgsm_tutorial.py || true
+ # python $DIR/remove_runnable_code.py intermediate_source/spatial_transformer_tutorial.py intermediate_source/spatial_transformer_tutorial.py || true
+ # Temp remove for 1.10 release.
+ # python $DIR/remove_runnable_code.py advanced_source/neural_style_tutorial.py advanced_source/neural_style_tutorial.py || true
+
# TODO: Fix bugs in these tutorials to make them runnable again
- python $DIR/remove_runnable_code.py beginner_source/audio_classifier_tutorial.py beginner_source/audio_classifier_tutorial.py
+ # python $DIR/remove_runnable_code.py beginner_source/audio_classifier_tutorial.py beginner_source/audio_classifier_tutorial.py || true
+
+ # Remove runnable code from tensorboard_profiler_tutorial.py as it frequently crashes, see https://github.com/pytorch/pytorch/issues/74139
+ # python $DIR/remove_runnable_code.py intermediate_source/tensorboard_profiler_tutorial.py intermediate_source/tensorboard_profiler_tutorial.py || true
# Step 2: Keep certain tutorials based on file count, and remove runnable code in all other tutorials
# IMPORTANT NOTE: We assume that each tutorial has a UNIQUE filename.
- export WORKER_ID=$(echo "${JOB_BASE_NAME}" | tr -dc '0-9')
- count=0
- FILES_TO_RUN=()
- for filename in $(find beginner_source/ -name '*.py' -not -path '*/data/*'); do
- if [ $(($count % $NUM_WORKERS)) != $WORKER_ID ]; then
- echo "Removing runnable code from "$filename
- python $DIR/remove_runnable_code.py $filename $filename
- else
- echo "Keeping "$filename
- FILES_TO_RUN+=($(basename $filename .py))
- fi
- count=$((count+1))
- done
- for filename in $(find intermediate_source/ -name '*.py' -not -path '*/data/*'); do
- if [ $(($count % $NUM_WORKERS)) != $WORKER_ID ]; then
- echo "Removing runnable code from "$filename
- python $DIR/remove_runnable_code.py $filename $filename
- else
- echo "Keeping "$filename
- FILES_TO_RUN+=($(basename $filename .py))
- fi
- count=$((count+1))
- done
- for filename in $(find advanced_source/ -name '*.py' -not -path '*/data/*'); do
- if [ $(($count % $NUM_WORKERS)) != $WORKER_ID ]; then
- echo "Removing runnable code from "$filename
- python $DIR/remove_runnable_code.py $filename $filename
- else
- echo "Keeping "$filename
- FILES_TO_RUN+=($(basename $filename .py))
- fi
- count=$((count+1))
- done
- echo "FILES_TO_RUN: " ${FILES_TO_RUN[@]}
+ FILES_TO_RUN=$(python .jenkins/get_files_to_run.py)
+ echo "FILES_TO_RUN: " ${FILES_TO_RUN}
+ # Files to run must be accessible to subprocessed (at least to `download_data.py`)
+ export FILES_TO_RUN
- # Step 3: Run `make docs` to generate HTML files and static files for these tutorials
+ # Step 3: Run `make docs` to generate HTML files and static files for these tutorialis
+ pip3 install -e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
make docs
+ # Step 3.1: Run the post-processing script:
+ python .jenkins/post_process_notebooks.py
+
# Step 4: If any of the generated files are not related the tutorial files we want to run,
# then we remove them
- for filename in $(find docs/beginner docs/intermediate docs/advanced -name '*.html'); do
+ set +x
+ for filename in $(find docs/beginner docs/intermediate docs/advanced docs/recipes docs/prototype -name '*.html'); do
file_basename=$(basename $filename .html)
- if [[ ! " ${FILES_TO_RUN[@]} " =~ " ${file_basename} " ]]; then
+ if [[ ! " ${FILES_TO_RUN} " =~ " ${file_basename} " ]]; then
+ echo "removing $filename"
rm $filename
fi
done
- for filename in $(find docs/beginner docs/intermediate docs/advanced -name '*.rst'); do
+ for filename in $(find docs/beginner docs/intermediate docs/advanced docs/recipes docs/prototype -name '*.rst'); do
file_basename=$(basename $filename .rst)
- if [[ ! " ${FILES_TO_RUN[@]} " =~ " ${file_basename} " ]]; then
+ if [[ ! " ${FILES_TO_RUN} " =~ " ${file_basename} " ]]; then
+ echo "removing $filename"
rm $filename
fi
done
for filename in $(find docs/_downloads -name '*.py'); do
file_basename=$(basename $filename .py)
- if [[ ! " ${FILES_TO_RUN[@]} " =~ " ${file_basename} " ]]; then
+ if [[ ! " ${FILES_TO_RUN} " =~ " ${file_basename} " ]]; then
+ echo "removing $filename"
rm $filename
fi
done
for filename in $(find docs/_downloads -name '*.ipynb'); do
file_basename=$(basename $filename .ipynb)
- if [[ ! " ${FILES_TO_RUN[@]} " =~ " ${file_basename} " ]]; then
+ if [[ ! " ${FILES_TO_RUN} " =~ " ${file_basename} " ]]; then
+ echo "removing $filename"
rm $filename
fi
done
- for filename in $(find docs/_sources/beginner docs/_sources/intermediate docs/_sources/advanced -name '*.rst.txt'); do
+ for filename in $(find docs/_sources/beginner docs/_sources/intermediate docs/_sources/advanced docs/_sources/recipes -name '*.rst.txt'); do
file_basename=$(basename $filename .rst.txt)
- if [[ ! " ${FILES_TO_RUN[@]} " =~ " ${file_basename} " ]]; then
+ if [[ ! " ${FILES_TO_RUN} " =~ " ${file_basename} " ]]; then
+ echo "removing $filename"
rm $filename
fi
done
- for filename in $(find docs/.doctrees/beginner docs/.doctrees/intermediate docs/.doctrees/advanced -name '*.doctree'); do
+ for filename in $(find docs/.doctrees/beginner docs/.doctrees/intermediate docs/.doctrees/advanced docs/.doctrees/recipes docs/.doctrees/prototype -name '*.doctree'); do
file_basename=$(basename $filename .doctree)
- if [[ ! " ${FILES_TO_RUN[@]} " =~ " ${file_basename} " ]]; then
+ if [[ ! " ${FILES_TO_RUN} " =~ " ${file_basename} " ]]; then
+ echo "removing $filename"
rm $filename
fi
done
+ set -x
# Step 5: Remove INVISIBLE_CODE_BLOCK from .html/.rst.txt/.ipynb/.py files
bash $DIR/remove_invisible_code_block_batch.sh docs
+ python .jenkins/validate_tutorials_built.py
# Step 6: Copy generated files to S3, tag with commit ID
7z a worker_${WORKER_ID}.7z docs
- aws s3 cp worker_${WORKER_ID}.7z s3://${BUCKET_NAME}/${COMMIT_ID}/worker_${WORKER_ID}.7z --acl public-read
-elif [[ "${JOB_BASE_NAME}" == *manager ]]; then
+ awsv2 s3 cp worker_${WORKER_ID}.7z s3://${BUCKET_NAME}/${COMMIT_ID}/worker_${WORKER_ID}.7z
+elif [[ "${JOB_TYPE}" == "manager" ]]; then
# Step 1: Generate no-plot HTML pages for all tutorials
+ pip3 install -e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
make html-noplot
cp -r _build/html docs
# Step 2: Wait for all workers to finish
- set +e
- for ((worker_id=0;worker_id{link_text}'
+ elif item['t'] == 'Code':
+ # Need to remove icticon as they don't render in .ipynb
+ if any(value == 'octicon' for key, value in item['c'][0][2]):
+ return ''
+ else:
+ # Escape the code and wrap it in tags
+ return f'{html.escape(item["c"][1])}
'
+ elif item['t'] == 'CodeBlock':
+ # Escape the code block and wrap it in tags
+ return f'{html.escape(item["c"][1])}
'
+ else:
+ return ''
+
+
+def process_admonitions(key, value, format, meta):
+ # Replace admonitions with proper HTML.
+ if key == 'Div':
+ [[ident, classes, keyvals], contents] = value
+ if 'note' in classes:
+ color = '#54c7ec'
+ label = 'NOTE:'
+ elif 'tip' in classes:
+ color = '#6bcebb'
+ label = 'TIP:'
+ elif 'warning' in classes:
+ color = '#e94f3b'
+ label = 'WARNING:'
+ else:
+ return
+
+ note_content = []
+ for block in contents:
+ if block.get('t') == 'Para':
+ for item in block['c']:
+ if item['t'] == 'Str':
+ note_content.append(Str(item['c']))
+ elif item['t'] == 'Space':
+ note_content.append(Space())
+ elif item['t'] == 'Link':
+ note_content.append(Link(*item['c']))
+ elif item['t'] == 'Code':
+ note_content.append(Code(*item['c']))
+ elif block.get('t') == 'CodeBlock':
+ note_content.append(CodeBlock(*block['c']))
+
+ note_content_md = ''.join(to_markdown(item) for item in note_content)
+ html_content = markdown.markdown(note_content_md)
+
+ return [{'t': 'RawBlock', 'c': ['html', f'{label}
']}, {'t': 'RawBlock', 'c': ['html', '']}, {'t': 'RawBlock', 'c': ['html', html_content]}, {'t': 'RawBlock', 'c': ['html', '
']}]
+ elif key == 'RawBlock':
+ # this is needed for the cells that have embedded video.
+ # We add a special tag to those: ``` {python, .jupyter-code-cell}
+ # The post-processing script then finds those and genrates separate
+ # code cells that can load video.
+ [format, content] = value
+ if format == 'html' and 'iframe' in content:
+ # Extract the video URL
+ video_url = content.split('src="')[1].split('"')[0]
+ # Create the Python code to display the video
+ python_code = f"""
+from IPython.display import display, HTML
+html_code = \"""
+{content}
+\"""
+display(HTML(html_code))
+"""
+
+ return {'t': 'CodeBlock', 'c': [['', ['python', 'jupyter-code-cell'], []], python_code]}
+
+
+def process_images(key, value, format, meta):
+ # Add https://pytorch.org/tutorials/ to images so that they
+ # load correctly in the notebook.
+ if key != 'Image':
+ return None
+ [ident, classes, keyvals], caption, [src, title] = value
+ if not src.startswith('http'):
+ while src.startswith('../'):
+ src = src[3:]
+ if src.startswith('/_static'):
+ src = src[1:]
+ src = 'https://pytorch.org/tutorials/' + src
+
+ return {'t': 'Image', 'c': [[ident, classes, keyvals], caption, [src, title]]}
+
+
+def process_grids(key, value, format, meta):
+ # Generate side by side grid cards. Only for the two-cards layout
+ # that we use in the tutorial template.
+ if key == 'Div':
+ [[ident, classes, keyvals], contents] = value
+ if 'grid' in classes:
+ columns = ['',
+ '
']
+ column_num = 0
+ for block in contents:
+ if 't' in block and block['t'] == 'Div' and 'grid-item-card' in block['c'][0][1]:
+ item_html = ''
+ for item in block['c'][1]:
+ if item['t'] == 'Para':
+ item_html += '
' + ''.join(to_markdown(i) for i in item['c']) + ' '
+ elif item['t'] == 'BulletList':
+ item_html += '
'
+ for list_item in item['c']:
+ item_html += '' + ''.join(to_markdown(i) for i in list_item[0]['c']) + ' '
+ item_html += ' '
+ columns[column_num] += item_html
+ column_num = (column_num + 1) % 2
+ columns = [column + '
' for column in columns]
+ return {'t': 'RawBlock', 'c': ['html', ''.join(columns)]}
+
+def is_code_block(item):
+ return item['t'] == 'Code' and 'octicon' in item['c'][1]
+
+
+def process_all(key, value, format, meta):
+ for transform in [process_admonitions, process_images, process_grids]:
+ new_value = transform(key, value, format, meta)
+ if new_value is not None:
+ break
+ return new_value
+
+
+if __name__ == "__main__":
+ toJSONFilter(process_all)
diff --git a/.jenkins/download_data.py b/.jenkins/download_data.py
new file mode 100644
index 00000000000..939e63fc7a8
--- /dev/null
+++ b/.jenkins/download_data.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python3
+import hashlib
+import os
+
+from typing import Optional
+from urllib.request import urlopen, Request
+from pathlib import Path
+from zipfile import ZipFile
+
+REPO_BASE_DIR = Path(__file__).absolute().parent.parent
+DATA_DIR = REPO_BASE_DIR / "_data"
+BEGINNER_DATA_DIR = REPO_BASE_DIR / "beginner_source" / "data"
+INTERMEDIATE_DATA_DIR = REPO_BASE_DIR / "intermediate_source" / "data"
+ADVANCED_DATA_DIR = REPO_BASE_DIR / "advanced_source" / "data"
+PROTOTYPE_DATA_DIR = REPO_BASE_DIR / "unstable_source" / "data"
+FILES_TO_RUN = os.getenv("FILES_TO_RUN")
+
+
+def size_fmt(nbytes: int) -> str:
+ """Returns a formatted file size string"""
+ KB = 1024
+ MB = 1024 * KB
+ GB = 1024 * MB
+ if abs(nbytes) >= GB:
+ return f"{nbytes * 1.0 / GB:.2f} Gb"
+ elif abs(nbytes) >= MB:
+ return f"{nbytes * 1.0 / MB:.2f} Mb"
+ elif abs(nbytes) >= KB:
+ return f"{nbytes * 1.0 / KB:.2f} Kb"
+ return str(nbytes) + " bytes"
+
+
+def download_url_to_file(url: str,
+ dst: Optional[str] = None,
+ prefix: Optional[Path] = None,
+ sha256: Optional[str] = None) -> Path:
+ dst = dst if dst is not None else Path(url).name
+ dst = dst if prefix is None else str(prefix / dst)
+ if Path(dst).exists():
+ print(f"Skip downloading {url} as {dst} already exists")
+ return Path(dst)
+ file_size = None
+ u = urlopen(Request(url, headers={"User-Agent": "tutorials.downloader"}))
+ meta = u.info()
+ if hasattr(meta, 'getheaders'):
+ content_length = meta.getheaders("Content-Length")
+ else:
+ content_length = meta.get_all("Content-Length")
+ if content_length is not None and len(content_length) > 0:
+ file_size = int(content_length[0])
+ sha256_sum = hashlib.sha256()
+ with open(dst, "wb") as f:
+ while True:
+ buffer = u.read(32768)
+ if len(buffer) == 0:
+ break
+ sha256_sum.update(buffer)
+ f.write(buffer)
+ digest = sha256_sum.hexdigest()
+ if sha256 is not None and sha256 != digest:
+ Path(dst).unlink()
+ raise RuntimeError(f"Downloaded {url} has unexpected sha256sum {digest} should be {sha256}")
+ print(f"Downloaded {url} sha256sum={digest} size={size_fmt(file_size)}")
+ return Path(dst)
+
+
+def unzip(archive: Path, tgt_dir: Path) -> None:
+ with ZipFile(str(archive), "r") as zip_ref:
+ zip_ref.extractall(str(tgt_dir))
+
+
+def download_hymenoptera_data():
+ # transfer learning tutorial data
+ z = download_url_to_file("https://download.pytorch.org/tutorial/hymenoptera_data.zip",
+ prefix=DATA_DIR,
+ sha256="fbc41b31d544714d18dd1230b1e2b455e1557766e13e67f9f5a7a23af7c02209",
+ )
+ unzip(z, BEGINNER_DATA_DIR)
+
+
+def download_nlp_data() -> None:
+ # nlp tutorial data
+ z = download_url_to_file("https://download.pytorch.org/tutorial/data.zip",
+ prefix=DATA_DIR,
+ sha256="fb317e80248faeb62dc25ef3390ae24ca34b94e276bbc5141fd8862c2200bff5",
+ )
+ # This will unzip all files in data.zip to intermediate_source/data/ folder
+ unzip(z, INTERMEDIATE_DATA_DIR.parent)
+
+
+def download_dcgan_data() -> None:
+ # Download dataset for beginner_source/dcgan_faces_tutorial.py
+ z = download_url_to_file("https://s3.amazonaws.com/pytorch-tutorial-assets/img_align_celeba.zip",
+ prefix=DATA_DIR,
+ sha256="46fb89443c578308acf364d7d379fe1b9efb793042c0af734b6112e4fd3a8c74",
+ )
+ unzip(z, BEGINNER_DATA_DIR / "celeba")
+
+
+def download_lenet_mnist() -> None:
+ # Download model for beginner_source/fgsm_tutorial.py
+ download_url_to_file("https://docs.google.com/uc?export=download&id=1HJV2nUHJqclXQ8flKvcWmjZ-OU5DGatl",
+ prefix=BEGINNER_DATA_DIR,
+ dst="lenet_mnist_model.pth",
+ sha256="cb5f8e578aef96d5c1a2cc5695e1aa9bbf4d0fe00d25760eeebaaac6ebc2edcb",
+ )
+
+def download_gpu_quantization_torchao() -> None:
+ # Download SAM model checkpoint unstable_source/gpu_quantization_torchao_tutorial.py
+ download_url_to_file("https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth",
+ prefix=PROTOTYPE_DATA_DIR,
+ dst="sam_vit_h_4b8939.pth",
+ sha256="a7bf3b02f3ebf1267aba913ff637d9a2d5c33d3173bb679e46d9f338c26f262e",
+ )
+
+def main() -> None:
+ DATA_DIR.mkdir(exist_ok=True)
+ BEGINNER_DATA_DIR.mkdir(exist_ok=True)
+ ADVANCED_DATA_DIR.mkdir(exist_ok=True)
+ INTERMEDIATE_DATA_DIR.mkdir(exist_ok=True)
+ PROTOTYPE_DATA_DIR.mkdir(exist_ok=True)
+
+ if FILES_TO_RUN is None or "transfer_learning_tutorial" in FILES_TO_RUN:
+ download_hymenoptera_data()
+ nlp_tutorials = ["seq2seq_translation_tutorial", "char_rnn_classification_tutorial", "char_rnn_generation_tutorial"]
+ if FILES_TO_RUN is None or any(x in FILES_TO_RUN for x in nlp_tutorials):
+ download_nlp_data()
+ if FILES_TO_RUN is None or "dcgan_faces_tutorial" in FILES_TO_RUN:
+ download_dcgan_data()
+ if FILES_TO_RUN is None or "fgsm_tutorial" in FILES_TO_RUN:
+ download_lenet_mnist()
+ if FILES_TO_RUN is None or "gpu_quantization_torchao_tutorial" in FILES_TO_RUN:
+ download_gpu_quantization_torchao()
+
+if __name__ == "__main__":
+ main()
diff --git a/.jenkins/get_docker_tag.py b/.jenkins/get_docker_tag.py
new file mode 100644
index 00000000000..21c4a8f7089
--- /dev/null
+++ b/.jenkins/get_docker_tag.py
@@ -0,0 +1,18 @@
+import requests
+
+REQUEST_HEADERS = {
+ "Accept": "application/vnd.github.v3+json",
+}
+
+if __name__ == "__main__":
+ url = "https://api.github.com/repos/pytorch/pytorch/contents/.ci"
+
+ response = requests.get(url, headers=REQUEST_HEADERS)
+ docker_sha = None
+ for finfo in response.json():
+ if finfo["name"] == "docker":
+ docker_sha = finfo["sha"]
+ break
+ if docker_sha is None:
+ raise RuntimeError("Can't find sha sum of docker folder")
+ print(docker_sha)
diff --git a/.jenkins/get_files_to_run.py b/.jenkins/get_files_to_run.py
new file mode 100644
index 00000000000..bdf4562a827
--- /dev/null
+++ b/.jenkins/get_files_to_run.py
@@ -0,0 +1,106 @@
+from typing import Any, Dict, List, Optional, Tuple
+import json
+import os
+from pathlib import Path
+from remove_runnable_code import remove_runnable_code
+
+
+# Calculate repo base dir
+REPO_BASE_DIR = Path(__file__).absolute().parent.parent
+
+
+def get_all_files() -> List[str]:
+ sources = [x.relative_to(REPO_BASE_DIR) for x in REPO_BASE_DIR.glob("*_source/**/*.py") if 'data' not in x.parts]
+ return sorted([str(x) for x in sources])
+
+
+def read_metadata() -> Dict[str, Any]:
+ with (REPO_BASE_DIR / ".jenkins" / "metadata.json").open() as fp:
+ return json.load(fp)
+
+
+def calculate_shards(all_files: List[str], num_shards: int = 20) -> List[List[str]]:
+ sharded_files: List[Tuple[float, List[str]]] = [(0.0, []) for _ in range(num_shards)]
+ metadata = read_metadata()
+
+ def get_duration(file: str) -> int:
+ # tutorials not listed in the metadata.json file usually take
+ # <3min to run, so we'll default to 1min if it's not listed
+ return metadata.get(file, {}).get("duration", 60)
+
+ def get_needs_machine(file: str) -> Optional[str]:
+ return metadata.get(file, {}).get("needs", None)
+
+ def add_to_shard(i, filename):
+ shard_time, shard_jobs = sharded_files[i]
+ shard_jobs.append(filename)
+ sharded_files[i] = (
+ shard_time + get_duration(filename),
+ shard_jobs,
+ )
+
+ all_other_files = all_files.copy()
+ needs_multigpu = list(
+ filter(lambda x: get_needs_machine(x) == "linux.16xlarge.nvidia.gpu", all_files,)
+ )
+ needs_a10g = list(
+ filter(lambda x: get_needs_machine(x) == "linux.g5.4xlarge.nvidia.gpu", all_files,)
+ )
+ for filename in needs_multigpu:
+ # currently, the only job that has multigpu is the 0th worker,
+ # so we'll add all the jobs that need this machine to the 0th worker
+ add_to_shard(0, filename)
+ all_other_files.remove(filename)
+ for filename in needs_a10g:
+ # currently, workers 1-5 use linux.g5.4xlarge.nvidia.gpu (sm86, A10G),
+ # so we'll add all the jobs that need this machine to the 1st worker
+ add_to_shard(1, filename)
+ all_other_files.remove(filename)
+ sorted_files = sorted(all_other_files, key=get_duration, reverse=True,)
+
+ for filename in sorted_files:
+ min_shard_index = sorted(range(1, num_shards), key=lambda i: sharded_files[i][0])[
+ 0
+ ]
+ add_to_shard(min_shard_index, filename)
+ return [x[1] for x in sharded_files]
+
+
+def compute_files_to_keep(files_to_run: List[str]) -> List[str]:
+ metadata = read_metadata()
+ files_to_keep = list(files_to_run)
+ for file in files_to_run:
+ extra_files = metadata.get(file, {}).get("extra_files", [])
+ files_to_keep.extend(extra_files)
+ return files_to_keep
+
+
+def remove_other_files(all_files, files_to_keep) -> None:
+
+ for file in all_files:
+ if file not in files_to_keep:
+ remove_runnable_code(file, file)
+
+
+def parse_args() -> Any:
+ from argparse import ArgumentParser
+ parser = ArgumentParser("Select files to run")
+ parser.add_argument("--dry-run", action="store_true")
+ parser.add_argument("--num-shards", type=int, default=int(os.environ.get("NUM_WORKERS", "20")))
+ parser.add_argument("--shard-num", type=int, default=int(os.environ.get("WORKER_ID", "1")))
+ return parser.parse_args()
+
+
+def main() -> None:
+ args = parse_args()
+
+ all_files = get_all_files()
+ files_to_run = calculate_shards(all_files, num_shards=args.num_shards)[args.shard_num - 1]
+ if not args.dry_run:
+ remove_other_files(all_files, compute_files_to_keep(files_to_run))
+ stripped_file_names = [Path(x).stem for x in files_to_run]
+ print(" ".join(stripped_file_names))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.jenkins/get_sphinx_filenames.py b/.jenkins/get_sphinx_filenames.py
new file mode 100644
index 00000000000..b84267b48a3
--- /dev/null
+++ b/.jenkins/get_sphinx_filenames.py
@@ -0,0 +1,13 @@
+from pathlib import Path
+from typing import List
+
+from get_files_to_run import get_all_files
+from validate_tutorials_built import NOT_RUN
+
+
+def get_files_for_sphinx() -> List[str]:
+ all_py_files = get_all_files()
+ return [x for x in all_py_files if all(y not in x for y in NOT_RUN)]
+
+
+SPHINX_SHOULD_RUN = "|".join(get_files_for_sphinx())
diff --git a/.jenkins/insert_last_verified.py b/.jenkins/insert_last_verified.py
new file mode 100644
index 00000000000..b43ef8de8e8
--- /dev/null
+++ b/.jenkins/insert_last_verified.py
@@ -0,0 +1,160 @@
+import json
+import os
+import subprocess
+import sys
+from datetime import datetime
+
+from bs4 import BeautifulSoup
+
+
+json_file_path = "tutorials-review-data.json"
+
+# paths to skip from the post-processing script
+paths_to_skip = [
+ "beginner/examples_autograd/two_layer_net_custom_function", # not present in the repo
+ "beginner/examples_nn/two_layer_net_module", # not present in the repo
+ "beginner/examples_tensor/two_layer_net_numpy", # not present in the repo
+ "beginner/examples_tensor/two_layer_net_tensor", # not present in the repo
+ "beginner/examples_autograd/two_layer_net_autograd", # not present in the repo
+ "beginner/examples_nn/two_layer_net_optim", # not present in the repo
+ "beginner/examples_nn/two_layer_net_nn", # not present in the repo
+ "intermediate/coding_ddpg", # not present in the repo - will delete the carryover
+]
+# Mapping of source directories to build directories
+source_to_build_mapping = {
+ "beginner": "beginner_source",
+ "recipes": "recipes_source",
+ "distributed": "distributed",
+ "intermediate": "intermediate_source",
+ "prototype": "prototype_source",
+ "advanced": "advanced_source",
+ "": "", # root dir for index.rst
+}
+
+def get_git_log_date(file_path, git_log_args):
+ try:
+ result = subprocess.run(
+ ["git", "log"] + git_log_args + ["--", file_path],
+ capture_output=True,
+ text=True,
+ check=True,
+ )
+ if result.stdout:
+ date_str = result.stdout.splitlines()[0]
+ return datetime.strptime(date_str, "%a, %d %b %Y %H:%M:%S %z")
+ except subprocess.CalledProcessError:
+ pass
+ raise ValueError(f"Could not find date for {file_path}")
+
+def get_creation_date(file_path):
+ return get_git_log_date(file_path, ["--diff-filter=A", "--format=%aD"]).strftime("%b %d, %Y")
+
+
+def get_last_updated_date(file_path):
+ return get_git_log_date(file_path, ["-1", "--format=%aD"]).strftime("%b %d, %Y")
+
+# Try to find the source file with the given base path and the extensions .rst and .py
+def find_source_file(base_path):
+ for ext in [".rst", ".py"]:
+ source_file_path = base_path + ext
+ if os.path.exists(source_file_path):
+ return source_file_path
+ return None
+
+
+# Function to process a JSON file and insert the "Last Verified" information into the HTML files
+def process_json_file(build_dir , json_file_path):
+ with open(json_file_path, "r", encoding="utf-8") as json_file:
+ json_data = json.load(json_file)
+
+ for entry in json_data:
+ path = entry["Path"]
+ last_verified = entry["Last Verified"]
+ status = entry.get("Status", "")
+ if path in paths_to_skip:
+ print(f"Skipping path: {path}")
+ continue
+ if status in ["needs update", "not verified"]:
+ formatted_last_verified = "Not Verified"
+ elif last_verified:
+ try:
+ last_verified_date = datetime.strptime(last_verified, "%Y-%m-%d")
+ formatted_last_verified = last_verified_date.strftime("%b %d, %Y")
+ except ValueError:
+ formatted_last_verified = "Unknown"
+ else:
+ formatted_last_verified = "Not Verified"
+ if status == "deprecated":
+ formatted_last_verified += "Deprecated"
+
+ for build_subdir, source_subdir in source_to_build_mapping.items():
+ if path.startswith(build_subdir):
+ html_file_path = os.path.join(build_dir, path + ".html")
+ base_source_path = os.path.join(
+ source_subdir, path[len(build_subdir) + 1 :]
+ )
+ source_file_path = find_source_file(base_source_path)
+ break
+ else:
+ print(f"Warning: No mapping found for path {path}")
+ continue
+
+ if not os.path.exists(html_file_path):
+ print(
+ f"Warning: HTML file not found for path {html_file_path}."
+ "If this is a new tutorial, please add it to the audit JSON file and set the Verified status and todays's date."
+ )
+ continue
+
+ if not source_file_path:
+ print(f"Warning: Source file not found for path {base_source_path}.")
+ continue
+
+ created_on = get_creation_date(source_file_path)
+ last_updated = get_last_updated_date(source_file_path)
+
+ with open(html_file_path, "r", encoding="utf-8") as file:
+ soup = BeautifulSoup(file, "html.parser")
+ # Check if the
tag with class "date-info-last-verified" already exists
+ existing_date_info = soup.find("p", {"class": "date-info-last-verified"})
+ if existing_date_info:
+ print(
+ f"Warning:
tag with class 'date-info-last-verified' already exists in {html_file_path}"
+ )
+ continue
+
+ h1_tag = soup.find("h1") # Find the h1 tag to insert the dates
+ if h1_tag:
+ date_info_tag = soup.new_tag("p", **{"class": "date-info-last-verified"})
+ date_info_tag["style"] = "color: #6c6c6d; font-size: small;"
+ # Add the "Created On", "Last Updated", and "Last Verified" information
+ date_info_tag.string = (
+ f"Created On: {created_on} | "
+ f"Last Updated: {last_updated} | "
+ f"Last Verified: {formatted_last_verified}"
+ )
+ # Insert the new tag after the
tag
+ h1_tag.insert_after(date_info_tag)
+ # Save back to the HTML.
+ with open(html_file_path, "w", encoding="utf-8") as file:
+ file.write(str(soup))
+ else:
+ print(f"Warning: tag not found in {html_file_path}")
+
+
+def main():
+ if len(sys.argv) < 2:
+ print("Error: Build directory not provided. Exiting.")
+ exit(1)
+ build_dir = sys.argv[1]
+ print(f"Build directory: {build_dir}")
+ process_json_file(build_dir , json_file_path)
+ print(
+ "Finished processing JSON file. Please check the output for any warnings. "
+ "Pages like `nlp/index.html` are generated only during the full `make docs` "
+ "or `make html` build. Warnings about these files when you run `make html-noplot` "
+ "can be ignored."
+ )
+
+if __name__ == "__main__":
+ main()
diff --git a/.jenkins/metadata.json b/.jenkins/metadata.json
new file mode 100644
index 00000000000..6e82d054b4e
--- /dev/null
+++ b/.jenkins/metadata.json
@@ -0,0 +1,76 @@
+{
+ "intermediate_source/ax_multiobjective_nas_tutorial.py": {
+ "extra_files": ["intermediate_source/mnist_train_nas.py"],
+ "duration": 2000
+ },
+ "beginner_source/dcgan_faces_tutorial.py": {
+ "duration": 2000
+ },
+ "intermediate_source/seq2seq_translation_tutorial.py": {
+ "duration": 1200
+ },
+ "beginner_source/hyperparameter_tuning_tutorial.py": {
+ "duration": 0
+ },
+ "advanced_source/dynamic_quantization_tutorial.py": {
+ "duration": 380
+ },
+ "beginner_source/chatbot_tutorial.py": {
+ "duration": 330
+ },
+ "intermediate_source/pipeline_tutorial.py": {
+ "duration": 320,
+ "needs": "linux.16xlarge.nvidia.gpu"
+ },
+ "beginner_source/blitz/data_parallel_tutorial.py": {
+ "needs": "linux.16xlarge.nvidia.gpu"
+ },
+ "intermediate_source/model_parallel_tutorial.py": {
+ "needs": "linux.16xlarge.nvidia.gpu"
+ },
+ "intermediate_source/torchrec_intro_tutorial.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu"
+ },
+ "recipes_source/torch_export_aoti_python.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu"
+ },
+ "advanced_source/pendulum.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu",
+ "_comment": "need to be here for the compiling_optimizer_lr_scheduler.py to run."
+ },
+ "intermediate_source/torchvision_tutorial.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu",
+ "_comment": "does not require a5g but needs to run before gpu_quantization_torchao_tutorial.py."
+ },
+ "advanced_source/coding_ddpg.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu",
+ "_comment": "does not require a5g but needs to run before gpu_quantization_torchao_tutorial.py."
+ },
+ "recipes_source/compiling_optimizer_lr_scheduler.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu"
+ },
+ "intermediate_source/torch_compile_tutorial.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu"
+ },
+ "intermediate_source/torch_export_tutorial.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu"
+ },
+ "intermediate_source/scaled_dot_product_attention_tutorial.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu"
+ },
+ "intermediate_source/transformer_building_blocks.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu"
+ },
+ "recipes_source/torch_compile_user_defined_triton_kernel_tutorial.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu"
+ },
+ "recipes_source/regional_compilation.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu"
+ },
+ "advanced_source/semi_structured_sparse.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu"
+ },
+ "prototype_source/gpu_quantization_torchao_tutorial.py": {
+ "needs": "linux.g5.4xlarge.nvidia.gpu"
+ }
+}
diff --git a/.jenkins/post_process_notebooks.py b/.jenkins/post_process_notebooks.py
new file mode 100644
index 00000000000..d10eb5a1bcc
--- /dev/null
+++ b/.jenkins/post_process_notebooks.py
@@ -0,0 +1,97 @@
+import nbformat as nbf
+import os
+import re
+
+"""
+This post-processing script needs to run after the .ipynb files are
+generated. The script removes extraneous ```{=html} syntax from the
+admonitions and splits the cells that have video iframe into a
+separate code cell that can be run to load the video directly
+in the notebook. This script is included in build.sh.
+"""
+
+
+# Pattern to search ``` {.python .jupyter-code-cell}
+pattern = re.compile(r'(.*?)``` {\.python \.jupyter-code-cell}\n(.*?from IPython\.display import display, HTML.*?display\(HTML\(html_code\)\))\n```(.*)', re.DOTALL)
+
+
+def process_video_cell(notebook_path):
+ """
+ This function finds the code blocks with the
+ "``` {.python .jupyter-code-cell}" code bocks and slices them
+ into a separe code cell (instead of markdown) which allows to
+ load the video in the notebook. The rest of the content is placed
+ in a new markdown cell.
+ """
+ print(f'Processing file: {notebook_path}')
+ notebook = nbf.read(notebook_path, as_version=4)
+
+ # Iterate over markdown cells
+ for i, cell in enumerate(notebook.cells):
+ if cell.cell_type == 'markdown':
+ match = pattern.search(cell.source)
+ if match:
+ print(f'Match found in cell {i}: {match.group(0)[:100]}...')
+ # Extract the parts before and after the video code block
+ before_html_block = match.group(1)
+ code_block = match.group(2)
+
+ # Add a comment to run the cell to display the video
+ code_block = "# Run this cell to load the video\n" + code_block
+ # Create a new code cell
+ new_code_cell = nbf.v4.new_code_cell(source=code_block)
+
+ # Replace the original markdown cell with the part before the code block
+ cell.source = before_html_block
+
+ # Insert the new code cell after the current one
+ notebook.cells.insert(i+1, new_code_cell)
+ print(f'New code cell created with source: {new_code_cell.source}')
+
+ # If there is content after the HTML code block, create a new markdown cell
+ if len(match.group(3).strip()) > 0:
+ after_html_block = match.group(3)
+ new_markdown_cell = nbf.v4.new_markdown_cell(source=after_html_block)
+ # Create a new markdown cell and add the content after code block there
+ notebook.cells.insert(i+2, new_markdown_cell)
+
+ else:
+ # Remove ```{=html} from the code block
+ cell.source = remove_html_tag(cell.source)
+
+ nbf.write(notebook, notebook_path)
+
+
+def remove_html_tag(content):
+ """
+ Pandoc adds an extraneous ```{=html} ``` to raw HTML blocks which
+ prevents it from rendering correctly. This function removes
+ ```{=html} that we don't need.
+ """
+ content = re.sub(r'```{=html}\n \n```', '">', content)
+ content = re.sub(r'<\/div>\n```', '
\n', content)
+ content = re.sub(r'```{=html}\n \n```', '\n', content)
+ content = re.sub(r'```{=html}', '', content)
+ content = re.sub(r'
\n```', '', content)
+ return content
+
+
+def walk_dir(downloads_dir):
+ """
+ Walk the dir and process all notebook files in
+ the _downloads directory and its subdirectories.
+ """
+ for root, dirs, files in os.walk(downloads_dir):
+ for filename in files:
+ if filename.endswith('.ipynb'):
+ process_video_cell(os.path.join(root, filename))
+
+
+def main():
+ downloads_dir = './docs/_downloads'
+ walk_dir(downloads_dir)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.jenkins/remove_invisible_code_block_from_html.py b/.jenkins/remove_invisible_code_block_from_html.py
index 921894e90e2..827b9802d91 100644
--- a/.jenkins/remove_invisible_code_block_from_html.py
+++ b/.jenkins/remove_invisible_code_block_from_html.py
@@ -8,7 +8,7 @@
html = html_file.read()
html_soup = BeautifulSoup(html, 'html.parser')
-elems = html_soup.find_all("div", {"class": "highlight-python"})
+elems = html_soup.find_all("div", {"class": "highlight-default"})
for elem in elems:
if "%%%%%%INVISIBLE_CODE_BLOCK%%%%%%" in str(elem):
elem.decompose()
diff --git a/.jenkins/remove_invisible_code_block_from_rst_txt.py b/.jenkins/remove_invisible_code_block_from_rst_txt.py
index b7452f81e85..e6eb648e754 100644
--- a/.jenkins/remove_invisible_code_block_from_rst_txt.py
+++ b/.jenkins/remove_invisible_code_block_from_rst_txt.py
@@ -7,7 +7,7 @@
with open(rst_txt_file_path, 'r', encoding='utf-8') as rst_txt_file:
rst_txt = rst_txt_file.read()
-splits = rst_txt.split('.. code-block:: python\n\n\n # %%%%%%INVISIBLE_CODE_BLOCK%%%%%%\n')
+splits = rst_txt.split('.. code-block:: default\n\n\n # %%%%%%INVISIBLE_CODE_BLOCK%%%%%%\n')
if len(splits) == 2:
code_before_invisible_block = splits[0]
code_after_invisible_block = splits[1].split(' # %%%%%%INVISIBLE_CODE_BLOCK%%%%%%\n')[1]
diff --git a/.jenkins/remove_runnable_code.py b/.jenkins/remove_runnable_code.py
index 6a61cb656bc..037017d8d76 100644
--- a/.jenkins/remove_runnable_code.py
+++ b/.jenkins/remove_runnable_code.py
@@ -4,44 +4,55 @@
STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE = "STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE"
STATE_NORMAL = "STATE_NORMAL"
-python_file_path = sys.argv[1]
-output_file_path = sys.argv[2]
-with open(python_file_path, 'r', encoding='utf-8') as file:
- lines = file.readlines()
- ret_lines = []
- state = STATE_NORMAL
- for line in lines:
- if state == STATE_NORMAL:
- if line.startswith('#'):
- ret_lines.append(line)
- state = STATE_NORMAL
- elif line.startswith('"""') or line.startswith('r"""'):
- ret_lines.append(line)
- state = STATE_IN_MULTILINE_COMMENT_BLOCK_DOUBLE_QUOTE
- elif line.startswith("'''") or line.startswith("r'''"):
- ret_lines.append(line)
- state = STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE
- else:
- ret_lines.append("\n")
- state = STATE_NORMAL
- elif state == STATE_IN_MULTILINE_COMMENT_BLOCK_DOUBLE_QUOTE:
- if line.startswith('"""'):
- ret_lines.append(line)
- state = STATE_NORMAL
- else:
- ret_lines.append(line)
- state = STATE_IN_MULTILINE_COMMENT_BLOCK_DOUBLE_QUOTE
- elif state == STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE:
- if line.startswith("'''"):
- ret_lines.append(line)
- state = STATE_NORMAL
- else:
- ret_lines.append(line)
- state = STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE
+def remove_runnable_code(python_file_path, output_file_path):
+ with open(python_file_path, 'r', encoding='utf-8') as file:
+ lines = file.readlines()
+ ret_lines = []
+ state = STATE_NORMAL
+ for line in lines:
+ if state == STATE_NORMAL:
+ if line.startswith('#'):
+ ret_lines.append(line)
+ state = STATE_NORMAL
+ elif ((line.startswith('"""') or line.startswith('r"""')) and
+ line.endswith('"""')):
+ ret_lines.append(line)
+ state = STATE_NORMAL
+ elif line.startswith('"""') or line.startswith('r"""'):
+ ret_lines.append(line)
+ state = STATE_IN_MULTILINE_COMMENT_BLOCK_DOUBLE_QUOTE
+ elif ((line.startswith("'''") or line.startswith("r'''")) and
+ line.endswith("'''")):
+ ret_lines.append(line)
+ state = STATE_NORMAL
+ elif line.startswith("'''") or line.startswith("r'''"):
+ ret_lines.append(line)
+ state = STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE
+ else:
+ ret_lines.append("\n")
+ state = STATE_NORMAL
+ elif state == STATE_IN_MULTILINE_COMMENT_BLOCK_DOUBLE_QUOTE:
+ if line.startswith('"""'):
+ ret_lines.append(line)
+ state = STATE_NORMAL
+ else:
+ ret_lines.append(line)
+ state = STATE_IN_MULTILINE_COMMENT_BLOCK_DOUBLE_QUOTE
+ elif state == STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE:
+ if line.startswith("'''"):
+ ret_lines.append(line)
+ state = STATE_NORMAL
+ else:
+ ret_lines.append(line)
+ state = STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE
-ret_lines.append("\n# %%%%%%RUNNABLE_CODE_REMOVED%%%%%%")
+ ret_lines.append("\n# %%%%%%RUNNABLE_CODE_REMOVED%%%%%%")
-with open(output_file_path, 'w', encoding='utf-8') as file:
- for line in ret_lines:
- file.write(line)
+ with open(output_file_path, 'w', encoding='utf-8') as file:
+ for line in ret_lines:
+ file.write(line)
+
+
+if __name__ == "__main__":
+ remove_runnable_code(sys.argv[1], sys.argv[2])
diff --git a/.jenkins/test_files_to_run.py b/.jenkins/test_files_to_run.py
new file mode 100644
index 00000000000..b4842a7dd75
--- /dev/null
+++ b/.jenkins/test_files_to_run.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+from get_files_to_run import get_all_files, calculate_shards
+from unittest import TestCase, main
+from functools import reduce
+
+class TestSharding(TestCase):
+ def test_no_sharding(self):
+ all_files=get_all_files()
+ sharded_files = calculate_shards(all_files, 1)
+ self.assertSetEqual(set(all_files), set(sharded_files[0]))
+
+ def test_sharding(self, num_shards=20):
+ all_files=get_all_files()
+ sharded_files = map(set, calculate_shards(all_files, num_shards))
+ self.assertSetEqual(set(all_files), reduce(lambda x,y: x.union(y), sharded_files, set()))
+
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py
new file mode 100644
index 00000000000..75dd51dd789
--- /dev/null
+++ b/.jenkins/validate_tutorials_built.py
@@ -0,0 +1,84 @@
+from pathlib import Path
+from typing import List
+
+from bs4 import BeautifulSoup
+
+REPO_ROOT = Path(__file__).parent.parent
+
+# For every tutorial on this list, we should determine if it is ok to not run the tutorial (add a comment after
+# the file name to explain why, like intro.html), or fix the tutorial and remove it from this list).
+
+NOT_RUN = [
+ "beginner_source/basics/intro", # no code
+ "beginner_source/introyt/introyt_index", # no code
+ "beginner_source/onnx/intro_onnx",
+ "beginner_source/profiler",
+ "beginner_source/saving_loading_models",
+ "beginner_source/introyt/captumyt",
+ "beginner_source/examples_nn/polynomial_module",
+ "beginner_source/examples_nn/dynamic_net",
+ "beginner_source/examples_nn/polynomial_optim",
+ "beginner_source/examples_autograd/polynomial_autograd",
+ "beginner_source/examples_autograd/polynomial_custom_function",
+ "intermediate_source/dqn_with_rnn_tutorial", #not working on 2.8 release reenable after 3514
+ "intermediate_source/mnist_train_nas", # used by ax_multiobjective_nas_tutorial.py
+ "intermediate_source/torch_compile_conv_bn_fuser",
+ "intermediate_source/_torch_export_nightly_tutorial", # does not work on release
+ "advanced_source/usb_semisup_learn", # fails with CUDA OOM error, should try on a different worker
+ "unstable_source/gpu_direct_storage", # requires specific filesystem + GPUDirect Storage to be set up
+ "recipes_source/recipes/tensorboard_with_pytorch",
+ "recipes_source/recipes/what_is_state_dict",
+ "recipes_source/recipes/profiler_recipe",
+ "recipes_source/recipes/warmstarting_model_using_parameters_from_a_different_model",
+ "recipes_source/recipes/benchmark",
+ "recipes_source/recipes/tuning_guide",
+ "recipes_source/recipes/zeroing_out_gradients",
+ "recipes_source/recipes/defining_a_neural_network",
+ "recipes_source/recipes/timer_quick_start",
+ "recipes_source/recipes/amp_recipe",
+ "recipes_source/recipes/Captum_Recipe",
+ "intermediate_source/tensorboard_profiler_tutorial", # reenable after 2.0 release.
+ "advanced_source/semi_structured_sparse", # reenable after 3303 is fixed.
+ "intermediate_source/torchrec_intro_tutorial.py", #failing with 2.8 reenable after 3498
+]
+
+def tutorial_source_dirs() -> List[Path]:
+ return [
+ p.relative_to(REPO_ROOT).with_name(p.stem[:-7])
+ for p in REPO_ROOT.glob("*_source")
+ ]
+
+
+def main() -> None:
+ docs_dir = REPO_ROOT / "docs"
+ html_file_paths = []
+ for tutorial_source_dir in tutorial_source_dirs():
+ glob_path = f"{tutorial_source_dir}/**/*.html"
+ html_file_paths += docs_dir.glob(glob_path)
+
+ should_not_run = [f'{x.replace("_source", "")}.html' for x in NOT_RUN]
+ did_not_run = []
+ for html_file_path in html_file_paths:
+ with open(html_file_path, "r", encoding="utf-8") as html_file:
+ html = html_file.read()
+ html_soup = BeautifulSoup(html, "html.parser")
+ elems = html_soup.find_all("p", {"class": "sphx-glr-timing"})
+ for elem in elems:
+ if (
+ "Total running time of the script: ( 0 minutes 0.000 seconds)"
+ in elem.text
+ and not any(html_file_path.match(file) for file in should_not_run)
+ ):
+ did_not_run.append(html_file_path.as_posix())
+
+ if len(did_not_run) != 0:
+ raise RuntimeError(
+ "The following file(s) are not known bad but ran in 0.000 sec, meaning that any "
+ + "python code in this tutorial probably didn't run:\n{}".format(
+ "\n".join(did_not_run)
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.lintrunner.toml b/.lintrunner.toml
new file mode 100644
index 00000000000..d3a1cbd9885
--- /dev/null
+++ b/.lintrunner.toml
@@ -0,0 +1,225 @@
+merge_base_with = "origin/main"
+
+# 4805a6ead6f1e7f32351056e2602be4e908f69b7 is from pytorch/pytorch main branch 2025-07-16
+
+[[linter]]
+code = 'SPACES'
+include_patterns = ['**']
+exclude_patterns = [
+ "_static/**/*", # Contains some files that should usually not be linted
+ # All files below this should be checked and either removed from the
+ # exclusion list by fixing them or have a reason to be excluded.
+ "advanced_source/coding_ddpg.py",
+ "advanced_source/cpp_autograd.rst",
+ "advanced_source/cpp_custom_ops.rst",
+ "advanced_source/generic_join.rst",
+ "advanced_source/neural_style_tutorial.py",
+ "advanced_source/pendulum.py",
+ "advanced_source/privateuseone.rst",
+ "advanced_source/semi_structured_sparse.py",
+ "advanced_source/sharding.rst",
+ "advanced_source/torch_script_custom_classes/custom_class_project/custom_test.py",
+ "advanced_source/transformer__timeseries_cpp_tutorial/transformer_timeseries.cpp",
+ "advanced_source/usb_semisup_learn.py",
+ "beginner_source/blitz/README.txt",
+ "beginner_source/blitz/neural_networks_tutorial.py",
+ "beginner_source/dcgan_faces_tutorial.py",
+ "beginner_source/ddp_series_fault_tolerance.rst",
+ "beginner_source/ddp_series_theory.rst",
+ "beginner_source/examples_nn/polynomial_module.py",
+ "beginner_source/examples_nn/polynomial_nn.py",
+ "beginner_source/hta_intro_tutorial.rst",
+ "beginner_source/hta_trace_diff_tutorial.rst",
+ "beginner_source/hybrid_frontend/README.txt",
+ "beginner_source/hybrid_frontend_tutorial.rst",
+ "beginner_source/hyperparameter_tuning_tutorial.py",
+ "beginner_source/introyt/README.txt",
+ "beginner_source/introyt/autogradyt_tutorial.py",
+ "beginner_source/introyt/captumyt.py",
+ "beginner_source/introyt/introyt1_tutorial.py",
+ "beginner_source/introyt/modelsyt_tutorial.py",
+ "beginner_source/introyt/tensorboardyt_tutorial.py",
+ "beginner_source/introyt/tensors_deeper_tutorial.py",
+ "beginner_source/introyt/trainingyt.py",
+ "beginner_source/knowledge_distillation_tutorial.py",
+ "beginner_source/nlp/sequence_models_tutorial.py",
+ "beginner_source/onnx/export_control_flow_model_to_onnx_tutorial.py",
+ "beginner_source/onnx/onnx_registry_tutorial.py",
+ "beginner_source/pytorch_with_examples.rst",
+ "beginner_source/saving_loading_models.py",
+ "beginner_source/template_tutorial.py",
+ "beginner_source/transfer_learning_tutorial.py",
+ "intermediate_source/TCPStore_libuv_backend.rst",
+ "intermediate_source/ax_multiobjective_nas_tutorial.py",
+ "intermediate_source/compiled_autograd_tutorial.rst",
+ "intermediate_source/ddp_series_multinode.rst",
+ "intermediate_source/dqn_with_rnn_tutorial.py",
+ "intermediate_source/fx_profiling_tutorial.py",
+ "intermediate_source/inductor_debug_cpu.py",
+ "intermediate_source/jacobians_hessians.py",
+ "intermediate_source/optimizer_step_in_backward_tutorial.py",
+ "intermediate_source/per_sample_grads.py",
+ "intermediate_source/pruning_tutorial.py",
+ "intermediate_source/reinforcement_q_learning.py",
+ "intermediate_source/tensorboard_profiler_tutorial.py",
+ "intermediate_source/torch_compile_tutorial.py",
+ "intermediate_source/transformer_building_blocks.py",
+ "unstable_source/README.md",
+ "unstable_source/README.txt",
+ "unstable_source/gpu_direct_storage.py",
+ "unstable_source/inductor_cpp_wrapper_tutorial.rst",
+ "unstable_source/inductor_windows.rst",
+ "unstable_source/maskedtensor_advanced_semantics.py",
+ "unstable_source/max_autotune_on_CPU_tutorial.rst",
+ "unstable_source/vmap_recipe.py",
+ "recipes_source/README.txt",
+ "recipes_source/compiling_optimizer.rst",
+ "recipes_source/compiling_optimizer_lr_scheduler.py",
+ "recipes_source/distributed_optim_torchscript.rst",
+ "recipes_source/foreach_map.py",
+ "recipes_source/profile_with_itt.rst",
+ "recipes_source/recipes/Captum_Recipe.py",
+ "recipes_source/recipes/benchmark.py",
+ "recipes_source/recipes/changing_default_device.py",
+ "recipes_source/recipes/defining_a_neural_network.py",
+ "recipes_source/recipes/tensorboard_with_pytorch.py",
+ "recipes_source/recipes/timer_quick_start.py",
+ "recipes_source/recipes/tuning_guide.py",
+ "recipes_source/recipes/warmstarting_model_using_parameters_from_a_different_model.py",
+ "recipes_source/recipes/what_is_state_dict.py",
+ "recipes_source/torch_compile_caching_tutorial.rst",
+ "recipes_source/torch_compile_torch_function_modes.py",
+ "recipes_source/torch_compile_user_defined_triton_kernel_tutorial.py",
+ "recipes_source/torch_compiler_set_stance_tutorial.py",
+ "recipes_source/torch_export_aoti_python.py",
+ "recipes_source/xeon_run_cpu.rst",
+ "advanced_source/cpp_export.rst",
+ "advanced_source/torch-script-parallelism.rst",
+ "advanced_source/torch_script_custom_classes.rst",
+ "advanced_source/torch_script_custom_ops.rst",
+ "recipes_source/torchscript_inference.rst",
+]
+init_command = [
+ 'python3',
+ 'tools/linter/adapters/run_from_link.py',
+ '--lint-name=grep_linter.py',
+ '--lint-link=https://raw.githubusercontent.com/pytorch/pytorch/4805a6ead6f1e7f32351056e2602be4e908f69b7/tools/linter/adapters/grep_linter.py',
+ '--',
+ '--dry-run={{DRYRUN}}',
+]
+command = [
+ 'python3',
+ 'tools/linter/adapters/run_from_link.py',
+ '--run-lint',
+ '--lint-name=grep_linter.py',
+ '--',
+ '--pattern=[[:blank:]]$',
+ '--linter-name=SPACES',
+ '--error-name=trailing spaces',
+ '--replace-pattern=s/[[:blank:]]+$//',
+ """--error-description=\
+ This line has trailing spaces; please remove them.\
+ """,
+ '--',
+ '@{{PATHSFILE}}'
+]
+
+[[linter]]
+code = 'TABS'
+include_patterns = ['**']
+exclude_patterns = [
+ "_static/**/*", # Contains some files that should usually not be linted
+ ".lintrunner.toml", # Ironically needs to contain the tab character to find in other files
+ "Makefile", # Wants tabs for indentationo
+ # All files below this should be checked and either removed from the
+ # exclusion list by fixing them or have a reason to be excluded.
+ "advanced_source/README.txt",
+ "advanced_source/cpp_frontend.rst",
+ "advanced_source/torch_script_custom_ops.rst",
+ "beginner_source/README.txt",
+ "beginner_source/basics/tensorqs_tutorial.py",
+ "beginner_source/blitz/README.txt",
+ "beginner_source/blitz/tensor_tutorial.py",
+ "beginner_source/hybrid_frontend/README.txt",
+ "beginner_source/nlp/README.txt",
+ "beginner_source/nlp/pytorch_tutorial.py",
+ "intermediate_source/README.txt",
+ "intermediate_source/TP_tutorial.rst",
+ "intermediate_source/inductor_debug_cpu.py",
+ "unstable_source/README.txt",
+ "recipes_source/README.txt",
+ "recipes_source/recipes/README.txt",
+ "recipes_source/xeon_run_cpu.rst",
+]
+init_command = [
+ 'python3',
+ 'tools/linter/adapters/run_from_link.py',
+ '--lint-name=grep_linter.py',
+ '--lint-link=https://raw.githubusercontent.com/pytorch/pytorch/4805a6ead6f1e7f32351056e2602be4e908f69b7/tools/linter/adapters/grep_linter.py',
+ '--',
+ '--dry-run={{DRYRUN}}',
+]
+command = [
+ 'python3',
+ 'tools/linter/adapters/run_from_link.py',
+ '--run-lint',
+ '--lint-name=grep_linter.py',
+ '--',
+ # @lint-ignore TXT2
+ '--pattern= ',
+ '--linter-name=TABS',
+ '--error-name=saw some tabs',
+ '--replace-pattern=s/\t/ /',
+ """--error-description=\
+ This line has tabs; please replace them with spaces.\
+ """,
+ '--',
+ '@{{PATHSFILE}}'
+]
+
+[[linter]]
+code = 'NEWLINE'
+include_patterns=['**']
+exclude_patterns=[
+ "_static/**/*", # Contains some files that should usually not be linted
+ # All files below this should be checked and either removed from the
+ # exclusion list by fixing them or have a reason to be excluded.
+ "advanced_source/extend_dispatcher.rst",
+ "advanced_source/neural_style_tutorial.py",
+ "advanced_source/sharding.rst",
+ "advanced_source/torch_script_custom_classes/custom_class_project/custom_test.py",
+ "advanced_source/transformer__timeseries_cpp_tutorial/transformer_timeseries.cpp",
+ "beginner_source/blitz/README.txt",
+ "beginner_source/dcgan_faces_tutorial.py",
+ "beginner_source/hta_trace_diff_tutorial.rst",
+ "beginner_source/hybrid_frontend/README.txt",
+ "beginner_source/nlp/pytorch_tutorial.py",
+ "beginner_source/template_tutorial.py",
+ "beginner_source/transfer_learning_tutorial.py",
+ "intermediate_source/custom_function_conv_bn_tutorial.py",
+ "intermediate_source/custom_function_double_backward_tutorial.rst",
+ "intermediate_source/forced_alignment_with_torchaudio_tutorial.rst",
+ "intermediate_source/nlp_from_scratch_index.rst",
+ "intermediate_source/pipeline_tutorial.rst",
+ "recipes_source/README.txt",
+ "recipes_source/script_optimized.rst",
+ "recipes_source/torch_compile_caching_configuration_tutorial.rst",
+ "recipes_source/torch_compile_caching_tutorial.rst",
+]
+init_command = [
+ 'python3',
+ 'tools/linter/adapters/run_from_link.py',
+ '--lint-name=newlines_linter.py',
+ '--lint-link=https://raw.githubusercontent.com/pytorch/pytorch/4805a6ead6f1e7f32351056e2602be4e908f69b7/tools/linter/adapters/newlines_linter.py',
+ '--',
+ '--dry-run={{DRYRUN}}',
+]
+command = [
+ 'python3',
+ 'tools/linter/adapters/run_from_link.py',
+ '--run-lint',
+ '--lint-name=newlines_linter.py',
+ '--',
+ '@{{PATHSFILE}}',
+]
+is_formatter = true
diff --git a/.lycheeignore b/.lycheeignore
new file mode 100644
index 00000000000..fc1e3f1fa85
--- /dev/null
+++ b/.lycheeignore
@@ -0,0 +1,17 @@
+# Used for links to be ignored during the link check.
+# Add link to file along with comment as to why it should be ignored
+
+#Example link in some of the tutorials that should be ignored
+file:///f:/libtmp/some_file
+
+#Ignore links with "file:///" to catch any other example links
+file:\/\/\/.*
+
+# Ignore colab link in the setting of conf.py
+https://pytorch.org/tutorials/beginner/colab/n
+
+# Ignore local host link from intermediate_source/tensorboard_tutorial.rst
+http://localhost:6006
+
+# Ignore local host link from advanced_source/cpp_frontend.rst
+https://www.uber.com/blog/deep-neuroevolution/
diff --git a/.pyspelling.yml b/.pyspelling.yml
new file mode 100644
index 00000000000..bce797e6559
--- /dev/null
+++ b/.pyspelling.yml
@@ -0,0 +1,163 @@
+spellchecker: aspell
+matrix:
+- name: python
+ sources:
+ - "**/*.py"
+ dictionary:
+ wordlists:
+ - en-wordlist.txt
+ pipeline:
+ - pyspelling.filters.python:
+ group_comments: true
+ - pyspelling.filters.context:
+ context_visible_first: true
+ delimiters:
+ # Exclude figure rST tags
+ - open: '\.\.\s+(figure|literalinclude|math|image|grid)::'
+ close: '\n'
+ # Exclude roles:
+ - open: ':(?:(class|py:mod|mod|func|meth|obj)):`'
+ content: '[^`]*'
+ close: '`'
+ # Exclude reStructuredText hyperlinks
+ - open: '\s'
+ content: '\w*'
+ close: '_'
+ # Exclude raw directive
+ - open: '\.\. (raw)::.*$\n*'
+ close: '\n'
+ # Exclude Python coding directives
+ - open: '-\*- coding:'
+ close: '\n'
+ # Exclude Authors:
+ - open: 'Author(|s):'
+ close: '\n'
+ # Exclude .rst directives:
+ - open: ':math:`.*`'
+ close: ' '
+ # Ignore multiline content in codeblock
+ - open: '(?s)^::\n\n '
+ close: '^\n'
+ # Ignore reStructuredText block directives
+ - open: '\.\. (code-block|math)::.*$\n*'
+ content: '(?P(^(?P[ ]+).*$\n))(?P(^([ \t]+.*|[ \t]*)$\n)*)'
+ close: '(^(?![ \t]+.*$))'
+ # Ignore references like "[1] Author: Title"
+ - open: '\[\d\]'
+ close: '\n'
+ - pyspelling.filters.markdown:
+ - pyspelling.filters.html:
+ ignores:
+ - code
+ - pre
+ - pyspelling.filters.url:
+- name: reST
+ sources:
+ - "**/*.rst"
+ dictionary:
+ wordlists:
+ - en-wordlist.txt
+ pipeline:
+ - pyspelling.filters.text:
+ - pyspelling.filters.context:
+ context_visible_first: true
+ delimiters:
+ # Ignore text between inline back ticks
+ - open: '(div style|iframe).*'
+ close: '\n'
+ - open: '(- )?(?P`+)'
+ close: '(?P=open)'
+ - open: ':figure:.*'
+ close: '\n'
+ # Ignore reStructuredText roles
+ - open: ':(?:(class|file|func|math|ref|octicon|meth|obj)):`'
+ content: '[^`]*'
+ close: '`'
+ - open: ':width:'
+ close: '$'
+ # Exclude raw directive
+ - open: '\.\. (raw|grid-item-card|galleryitem|includenodoc)::.*$\n*'
+ close: '\n'
+ # Ignore reStructuredText literals
+ - open: '::$'
+ close: '(?P(?:((?P[ ]+).*$)|(\n))+)'
+ # Ignore reStructuredText hyperlinks
+ - open: '\s'
+ content: '\w*'
+ close: '_'
+ # Ignore hyperlink in the DDP tutorials
+ - open: '`.*'
+ close: '`__'
+ # Ignore reStructuredText header ---
+ - open: '^'
+ content: '--*'
+ close: '$'
+ # Ignore reStructuredText header '''
+ - open: '^'
+ content: '''''*'
+ close: '$'
+ # Ignore reStructuredText block directives
+ - open: '\.\. (code-block|math|table)::.*$\n*'
+ content: '(?P(^(?P[ ]+).*$\n))(?P(^([ \t]+.*|[ \t]*)$\n)*)'
+ close: '(^(?![ \t]+.*$))'
+ - open: '\.\. (raw)::.*$\n*'
+ close: '^\s*$'
+ # Ignore reStructuredText substitution definitions
+ - open: '^\.\. \|[^|]+\|'
+ close: '$'
+ # Ignore reStructuredText substitutions
+ - open: '\|'
+ content: '[^|]*'
+ close: '\|_?'
+ # Ignore reStructuredText toctree
+ - open: '\.\.\s+toctree::'
+ close: '(?P(?:((?P[ ]+).*$)|(\n))+)'
+ # Ignore directives
+ - open: '\.\.\s+(image|include|only)::'
+ close: '$'
+ - pyspelling.filters.url:
+- name: markdown
+ sources:
+ - '**/*.md'
+ dictionary:
+ wordlists:
+ - en-wordlist.txt
+ pipeline:
+ - pyspelling.filters.markdown:
+ markdown_extensions:
+ - markdown.extensions.extra:
+ - markdown.extensions.admonition:
+ - markdown.extensions.codehilite:
+ - markdown.extensions.meta:
+ - markdown.extensions.tables:
+ - markdown.extensions.toc:
+ - pyspelling.filters.html:
+ comments: false
+ ignores:
+ - code
+ - pre
+ - tt
+ - img
+ - a
+ - table
+ - thead
+ - tbody
+ - th
+ - tr
+ - td
+ - pyspelling.filters.context:
+ context_visible_first: true
+ delimiters:
+ # Ignore code blocks
+ - open: '```[a-z]*\n'
+ close: '```\n'
+ # Ignore inline code
+ - open: '`'
+ close: '`'
+ # Ignore links
+ - open: '\[([^]]*)\]'
+ close: '\([^)]*\)'
+ # Ignore HTML comments
+ - open: ''
+ - pyspelling.filters.url:
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000000..b91e23b17c0
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,76 @@
+# Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to make participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies within all project spaces, and it also applies when
+an individual is representing the project or its community in public spaces.
+Examples of representing a project or community include using an official
+project e-mail address, posting via an official social media account, or acting
+as an appointed representative at an online or offline event. Representation of
+a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at . All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see
+https://www.contributor-covenant.org/faq
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 00000000000..9c52182e85a
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,367 @@
+# Contributing to tutorials
+
+We want to make contributing to this project as easy and transparent as
+possible. This file covers information on flagging issues, contributing
+updates to existing tutorials--and also submitting new tutorials.
+
+NOTE: This guide assumes that you have your GitHub account properly
+configured, such as having an SSH key. If this is your first time
+contributing on GitHub, see the [GitHub
+Documentation](https://docs.github.com/en/get-started/quickstart/contributing-to-projects)
+on contributing to projects.
+
+
+# Issues
+
+We use [GitHub Issues](https://github.com/pytorch/tutorials/issues) to
+track public bugs. Please ensure your description is clear and has
+sufficient instructions to be able to reproduce the issue.
+
+
+# Security Bugs
+
+Facebook has a [bounty program](https://www.facebook.com/whitehat/) for
+the safe disclosure of security bugs. For these types of issues, please
+go through the process outlined on that page and do not file a public
+issue.
+
+# Contributor License Agreement ("CLA")
+
+In order to accept a pull request, you need to submit a CLA. You only
+need to do this once and you will be able to work on all of Facebook's
+open source projects, not just PyTorch.
+
+Complete your CLA here:
+
+
+# License
+
+By contributing to the tutorials, you agree that your contributions will
+be licensed as described in the `LICENSE` file in the root directory of
+this source tree.
+
+
+# Updates to existing tutorials
+
+We welcome your pull requests (PR) for updates and fixes.
+
+1. If you haven't already, complete the Contributor License Agreement
+ ("CLA").
+1. Fork the repo and create a branch from
+ [`main`](https://github.com/pytorch/tutorials).
+1. Test your code.
+1. Lint your code with a tool such as
+ [Pylint](https://pylint.pycqa.org/en/latest/).
+1. Submit your PR for review.
+
+
+# New Tutorials
+
+There are three types of tutorial content that we host on
+[`pytorch.org/tutorials`](https://github.com/pytorch/tutorials):
+
+* **Interactive tutorials** are authored and submitted as Python files.
+ The build system converts these into Jupyter notebooks and HTML. The
+ code in these tutorials is run every time they are built. To keep
+ these tutorials up and running all their package dependencies need to
+ be resolved--which makes it more challenging to maintain this type of
+ tutorial.
+
+* **Non-interactive tutorials** are authored and submitted as
+ reStructuredText files. The build system only converts them into HTML;
+ the code in them does not run on build. These tutorials are easier to
+ create and maintain but they do not provide an interactive experience.
+
+
+* **Recipes** are tutorials that provide bite-sized, actionable
+ examples of how to use specific features, which differentiates them
+ from full-length tutorials. Recipes can be interactive or
+ non-interactive.
+
+
+# Managing data that is used by your tutorial
+
+Your tutorial might depend on external data, such as pre-trained models,
+training data, or test data. We recommend storing this data in a
+commonly-used storage service, such as Amazon S3, and instructing your
+users to download the data at the beginning of your tutorial.
+
+To download your data add a function to the [download.py](https://github.com/pytorch/tutorials/blob/main/.jenkins/download_data.py)
+script. Follow the same pattern as other download functions.
+Please do not add download logic to `Makefile` as it will incur download overhead for all CI shards.
+
+# Python packages used by your tutorial
+
+If your tutorial has dependencies that are not already defined in
+`requirements.txt`, you should add them to that file. We recommend that
+you use only mature, well-supported packages in your tutorial. Packages
+that are obscure or not well-maintained may break as a result of, for
+example, updates to Python or PyTorch or other packages. If your
+tutorial fails to build in our Continuous Integration (CI) system, we
+might contact you in order to resolve the issue.
+
+
+# Deprecation of tutorials
+
+Under some circumstances, we might deprecate--and subsequently
+archive--a tutorial removing it from the site. For example, if the
+tutorial breaks in our CI and we are not able to resolve the issue and
+are also not able to reach you, we might archive the tutorial. In these
+situations, resolving the breaking issue would normally be sufficient to
+make the tutorial available again.
+
+Another situation in which a tutorial might be deprecated is if it
+consistently receives low ratings--or low usage--by the community. Again,
+if this occurs, we will attempt to contact you.
+
+If we identify, or suspect, that your tutorial--or a package that your
+tutorial uses--has a **security or privacy** issue, we will immediately
+take the tutorial off the site.
+
+
+# Guidance for authoring tutorials and recipes
+
+In this section, we describe the process for creating tutorials and
+recipes for Pytorch.
+
+The first step is to decide which type of tutorial you want to create,
+taking into account how much support you can provide to keep the
+tutorial up-to-date. Ideally, your tutorial should demonstrate PyTorch
+functionality that is not duplicated in other tutorials.
+
+As described earlier, tutorials are resources that provide a holistic
+end-to-end understanding of how to use PyTorch. Recipes are scoped
+examples of how to use specific features; the goal of a recipe is to
+teach readers how to easily leverage features of PyTorch for their
+needs. Tutorials and recipes are always _actionable_. If the material is
+purely informative, consider adding it to the API docs instead.
+
+View our current [full-length tutorials](https://pytorch.org/tutorials/).
+
+To create actionable tutorials, start by identifying _learning
+objectives_, which are the end goals. Working backwards from these
+objectives will help to eliminate extraneous information.
+
+
+## Learning objectives ##
+
+To create the learning objectives, focus on what the user will
+implement. Set expectations by explicitly stating what the recipe will
+cover and what users will implement by the end. Here are some examples:
+
+- Create a custom dataset
+- Integrate a dataset using a library
+- Iterate over samples in the dataset
+- Apply a transform to the dataset
+
+
+## Voice and writing style ##
+
+Write for a global audience with an instructive and directive voice.
+
+- PyTorch has a global audience; use clear, easy to understand
+ language. Avoid idioms or other figures of speech.
+- To keep your instructions concise, use
+ [active voice](https://writing.wisc.edu/handbook/style/ccs_activevoice/) as much as possible.
+- For a short guide on the essentials of writing style,
+ [The Elements of Style](https://www.gutenberg.org/files/37134/37134-h/37134-h.htm)
+ is invaluable.
+- For extensive guidance on technical-writing style, the Google developer documentation
+ [google style](https://developers.google.com/style)
+ is a great resource.
+- Think of the process as similar to creating a (really practical)
+ Medium post.
+
+
+## Structure ##
+
+We recommend that tutorials use the following structure which guides users through the learning experience and provides appropriate context:
+
+1. Introduction
+1. Motivation: Why is this topic important?
+1. Link to relevant research papers or other background material.
+1. Learning objectives: Clearly state what the tutorial covers and what
+ users will implement by the end. For example: Provide a summary of
+ how the Integrated Gradients feature works and how to implement it
+ using Captum. The
+ [TensorBoard](https://pytorch.org/tutorials/intermediate/tensorboard_tutorial.html)
+ tutorial provides a good example of how to specify learning
+ objectives.
+1. Setup and requirements. Call out any required setup or data
+ downloads.
+1. Step-by-step instructions. Ideally, the steps in the tutorial should
+ map back to the learning objectives. Consider adding comments in the
+ code that correspond to these steps and that help to clarify what
+ each section of the code is doing.
+1. Link to relevant [PyTorch
+ documentation](https://pytorch.org/docs/stable/index.html). This
+ helps readers have context for the tutorial source code and better
+ understand how and why it implements the technique you’re
+ demonstrating.
+1. Recap/Conclusion: Summarize the steps and concepts covered. Highlight
+ key takeaways.
+1. (Optional) Additional practice exercises for users to test their
+ knowledge. An example is [NLP From Scratch: Generating Names with a
+ Character-Level RNN tutorial](https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html#exercises).
+1. Additional resources for more learning, such as documentation, other
+ tutorials, or relevant research.
+
+
+## Example Tutorials ##
+
+The following tutorials do a good job of demonstrating the ideas
+described in the preceding sections:
+
+- [Chatbot Tutorial](https://pytorch.org/tutorials/beginner/chatbot_tutorial.html)
+- [Tensorboard Tutorial](https://pytorch.org/tutorials/intermediate/tensorboard_tutorial.html)
+- [NLP From Scratch: Generating Names with a Character-Level RNN
+Tutorial](https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html)
+
+If you are creating a recipe, [this is a good
+example.](https://github.com/pytorch/tutorials/blob/main/recipes_source/recipes/what_is_state_dict.py)
+
+
+# Submission Process #
+
+Submit your tutorial as either a Python (`.py`) file or a
+reStructuredText (`.rst`) file. For Python files, the filename for your
+tutorial should end in "`_tutorial.py`"; for example,
+"`cool_pytorch_feature_tutorial.py`".
+
+Do not submit a Jupyter notebook. If you develop your tutorial in
+Jupyter, you'll need to convert it to Python. This
+[script](https://gist.github.com/chsasank/7218ca16f8d022e02a9c0deb94a310fe)
+is one option for performing this conversion.
+
+For Python files, our CI system runs your code during each build.
+
+
+## Add Your Tutorial Code ##
+
+1. [Fork and
+ clone](https://docs.github.com/en/get-started/quickstart/contributing-to-projects)
+ the repo:
+ [https://github.com/pytorch/tutorials](https://github.com/pytorch/tutorials)
+
+1. Put the tutorial in one of the
+ [`beginner_source`](https://github.com/pytorch/tutorials/tree/main/beginner_source),
+ [`intermediate_source`](https://github.com/pytorch/tutorials/tree/main/intermediate_source),
+ [`advanced_source`](https://github.com/pytorch/tutorials/tree/main/advanced_source)
+ based on the technical level of the content. For recipes, put the
+ recipe in
+ [`recipes_source`](https://github.com/pytorch/tutorials/tree/main/recipes_source).
+ In addition, for recipes, add the recipe in the recipes
+ [README.txt](https://github.com/pytorch/tutorials/blob/main/recipes_source/recipes/README.txt)
+ file.
+
+
+## Include Your Tutorial in `index.rst`#
+
+In order for your tutorial to appear on the website, and through tag
+search, you need to include it in `index.rst`, or for recipes, in
+`recipes_index.rst`.
+
+1. Open the relevant file
+ [`index.rst`](https://github.com/pytorch/tutorials/blob/main/index.rst)
+ or
+ [`recipes_index.rst`](https://github.com/pytorch/tutorials/blob/main/recipes_index.rst)
+1. Add a _card_ in reStructuredText format similar to the following:
+
+```
+.. customcarditem::
+ :header: Learn the Basics # Tutorial title
+ :card_description: A step-by-step guide to building a complete ML workflow with PyTorch. # Short description
+ :image: _static/img/thumbnails/cropped/60-min-blitz.png # Image that appears with the card
+ :link: beginner/basics/intro.html
+ :tags: Getting-Started
+```
+
+
+### Link ###
+
+The `link` should be the path to your tutorial in the source tree. For
+example, if the tutorial is in `beginner_source`, the link will be
+`beginner_source/rest/of/the/path.html`
+
+
+### Tags ###
+
+Choose tags from the existing tags in the file. Reach out to a project
+maintainer to create a new tag. The list of tags should not have any
+white space between the words. Multi-word tags, such as “Getting
+Started”, should be hyphenated: Getting-Started. Otherwise, the tutorial
+might fail to build, and the cards will not display properly.
+
+
+### Image ###
+
+Add a thumbnail to the
+[`_static/img/thumbnails/cropped`](https://github.com/pytorch/tutorials/tree/main/_static/img/thumbnails/cropped)
+directory. Images that render the best are square--that is, they have
+equal `x` and `y` dimensions--and also have high resolution. [Here is an
+example](https://github.com/pytorch/tutorials/blob/main/_static/img/thumbnails/cropped/loading-data.PNG).
+
+## `toctree` ##
+
+1. Add your tutorial under the corresponding toctree (also in
+ `index.rst`). For example, if you are adding a tutorial that
+ demonstrates the PyTorch ability to process images or video, add it
+ under `Image and Video`:
+
+```
+.. toctree::
+ :maxdepth: 2
+ :includehidden:
+ :hidden:
+ :caption: Image and Video
+
+ intermediate/torchvision_tutorial
+ beginner/my-new-tutorial
+```
+
+
+## Test Your Tutorial Locally ##
+
+The following command builds an HTML version of the tutorial website.
+
+```
+make html-noplot
+```
+
+This command does not run your tutorial code. To build the tutorial in a
+way that executes the code, use `make docs`. However, unless you have a
+GPU-powered machine and a proper PyTorch CUDA setup, running this `make`
+command locally won't work. The continuous integration (CI) system will
+test your tutorial when you submit your PR.
+
+
+## Submit the PR ##
+
+NOTE: Please do not use [ghstack](https://github.com/ezyang/ghstack). We
+do not support ghstack in the [`pytorch/tutorials`](https://github.com/pytorch/tutorials) repo.
+
+Submit the changes as a PR to the main branch of
+[`pytorch/tutorials`](https://github.com/pytorch/tutorials).
+
+1. Add your changes, commit, and push:
+
+ ```
+ git add -A
+ git commit -m "Add "
+ git push --set-upstream mybranch
+ ```
+
+1. Submit the PR and tag individuals on the PyTorch project who can review
+ your PR.
+1. Address all feedback comments from your reviewers.
+1. Make sure all CI checks are passing.
+
+Once you submit your PR, you can see a generated Netlify preview of your
+build. You can see an example Netlify preview at the following URL:
+
+>
+
+
+## Do not merge the PR yourself ##
+
+Please **DO NOT MERGE** your own PR; the tutorial won't be published. In order to avoid potential build breaks with the tutorials site, only certain maintainers can authorize publishing.
diff --git a/LICENSE b/LICENSE
index 1ba2484d469..338dffbfe74 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
BSD 3-Clause License
-Copyright (c) 2017, Pytorch contributors
+Copyright (c) 2017-2022, Pytorch contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/Makefile b/Makefile
index 6461fbb4243..7fcf1de6636 100644
--- a/Makefile
+++ b/Makefile
@@ -5,7 +5,7 @@
export LC_ALL=C
# You can set these variables from the command line.
-SPHINXOPTS =
+SPHINXOPTS ?=
SPHINXBUILD = sphinx-build
SPHINXPROJ = PyTorchTutorials
SOURCEDIR = .
@@ -13,6 +13,9 @@ BUILDDIR = _build
DATADIR = _data
GH_PAGES_SOURCES = $(SOURCEDIR) Makefile
+ZIPOPTS ?= -qo
+TAROPTS ?=
+
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
@@ -30,30 +33,19 @@ download:
# NOTE: Please consider using the Step1 and one of Step2 for new dataset,
# [something] should be replaced with the actual value.
- # Step1. DOWNLOAD: wget -N [SOURCE_FILE] -P $(DATADIR)
+ # Step1. DOWNLOAD: wget -nv -N [SOURCE_FILE] -P $(DATADIR)
# Step2-1. UNZIP: unzip -o $(DATADIR)/[SOURCE_FILE] -d [*_source/data/]
# Step2-2. UNTAR: tar -xzf $(DATADIR)/[SOURCE_FILE] -C [*_source/data/]
# Step2-3. AS-IS: cp $(DATADIR)/[SOURCE_FILE] [*_source/data/]
- # make data directories
- mkdir -p $(DATADIR)
- mkdir -p advanced_source/data
- mkdir -p beginner_source/data
- mkdir -p intermediate_source/data
-
- # transfer learning tutorial data
- wget -N https://download.pytorch.org/tutorial/hymenoptera_data.zip -P $(DATADIR)
- unzip -o $(DATADIR)/hymenoptera_data.zip -d beginner_source/data/
-
- # nlp tutorial data
- wget -N https://download.pytorch.org/tutorial/data.zip -P $(DATADIR)
- unzip -o $(DATADIR)/data.zip -d intermediate_source/ # This will unzip all files in data.zip to intermediate_source/data/ folder
+ # Run structured downloads first (will also make directories
+ python3 .jenkins/download_data.py
# data loader tutorial
- wget -N https://download.pytorch.org/tutorial/faces.zip -P $(DATADIR)
- unzip -o $(DATADIR)/faces.zip -d beginner_source/data/
+ wget -nv -N https://download.pytorch.org/tutorial/faces.zip -P $(DATADIR)
+ unzip $(ZIPOPTS) $(DATADIR)/faces.zip -d beginner_source/data/
- wget -N https://download.pytorch.org/models/tutorials/4000_checkpoint.tar -P $(DATADIR)
+ wget -nv -N https://download.pytorch.org/models/tutorials/4000_checkpoint.tar -P $(DATADIR)
cp $(DATADIR)/4000_checkpoint.tar beginner_source/data/
# neural style images
@@ -61,39 +53,44 @@ download:
mkdir -p advanced_source/data/images/
cp -r _static/img/neural-style/ advanced_source/data/images/
- # Download dataset for beginner_source/dcgan_faces_tutorial.py
- wget -N https://s3.amazonaws.com/pytorch-tutorial-assets/img_align_celeba.zip -P $(DATADIR)
- unzip -q -o $(DATADIR)/img_align_celeba.zip -d beginner_source/data/celeba
-
# Download dataset for beginner_source/hybrid_frontend/introduction_to_hybrid_frontend_tutorial.py
- wget -N https://s3.amazonaws.com/pytorch-tutorial-assets/iris.data -P $(DATADIR)
+ wget -nv -N https://s3.amazonaws.com/pytorch-tutorial-assets/iris.data -P $(DATADIR)
cp $(DATADIR)/iris.data beginner_source/data/
# Download dataset for beginner_source/chatbot_tutorial.py
- wget -N https://s3.amazonaws.com/pytorch-tutorial-assets/cornell_movie_dialogs_corpus.zip -P $(DATADIR)
- unzip -q -o $(DATADIR)/cornell_movie_dialogs_corpus.zip -d beginner_source/data/
-
- # Download dataset for beginner_source/audio_classifier_tutorial.py
- wget -N https://s3.amazonaws.com/pytorch-tutorial-assets/UrbanSound8K.tar.gz -P $(DATADIR)
- tar -xzf $(DATADIR)/UrbanSound8K.tar.gz -C ./beginner_source/data/
+ wget -nv -N https://s3.amazonaws.com/pytorch-tutorial-assets/cornell_movie_dialogs_corpus_v2.zip -P $(DATADIR)
+ unzip $(ZIPOPTS) $(DATADIR)/cornell_movie_dialogs_corpus_v2.zip -d beginner_source/data/
- # Download model for beginner_source/fgsm_tutorial.py
- wget -N https://s3.amazonaws.com/pytorch-tutorial-assets/lenet_mnist_model.pth -P $(DATADIR)
- cp $(DATADIR)/lenet_mnist_model.pth ./beginner_source/data/lenet_mnist_model.pth
+ # Download PennFudanPed dataset for intermediate_source/torchvision_tutorial.py
+ wget https://www.cis.upenn.edu/~jshi/ped_html/PennFudanPed.zip -P $(DATADIR)
+ unzip -o $(DATADIR)/PennFudanPed.zip -d intermediate_source/data/
+download-last-reviewed-json:
+ @echo "Downloading tutorials-review-data.json..."
+ curl -o tutorials-review-data.json https://raw.githubusercontent.com/pytorch/tutorials/refs/heads/last-reviewed-data-json/tutorials-review-data.json
+ @echo "Finished downloading tutorials-review-data.json."
docs:
make download
+ make download-last-reviewed-json
make html
+ @python .jenkins/insert_last_verified.py $(BUILDDIR)/html
rm -rf docs
cp -r $(BUILDDIR)/html docs
touch docs/.nojekyll
+ rm -rf tutorials-review-data.json
html-noplot:
$(SPHINXBUILD) -D plot_gallery=0 -b html $(SPHINXOPTS) "$(SOURCEDIR)" "$(BUILDDIR)/html"
- bash .jenkins/remove_invisible_code_block_batch.sh "$(BUILDDIR)/html"
+ # bash .jenkins/remove_invisible_code_block_batch.sh "$(BUILDDIR)/html"
@echo
+ make download-last-reviewed-json
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+ @echo "Running post-processing script to insert 'Last Verified' dates..."
+ @python .jenkins/insert_last_verified.py $(BUILDDIR)/html
+ rm -rf tutorials-review-data.json
clean-cache:
make clean
- rm -rf advanced beginner intermediate
+ rm -rf advanced beginner intermediate recipes
+ # remove additional python files downloaded for torchvision_tutorial.py
+ rm -rf intermediate_source/engine.py intermediate_source/utils.py intermediate_source/transforms.py intermediate_source/coco_eval.py intermediate_source/coco_utils.py
diff --git a/README.md b/README.md
index f3e16646f55..3b858a3882b 100644
--- a/README.md
+++ b/README.md
@@ -5,25 +5,75 @@ All the tutorials are now presented as sphinx style documentation at:
## [https://pytorch.org/tutorials](https://pytorch.org/tutorials)
+# Asking a question
+If you have a question about a tutorial, post in https://dev-discuss.pytorch.org/ rather than creating an issue in this repo. Your question will be answered much faster on the dev-discuss forum.
+
+# Submitting an issue
+
+You can submit the following types of issues:
+
+* Feature request - request a new tutorial to be added. Please explain why this tutorial is needed and how it demonstrates PyTorch value.
+* Bug report - report a failure or outdated information in an existing tutorial. When submitting a bug report, please run: `python3 -m torch.utils.collect_env` to get information about your environment and add the output to the bug report.
# Contributing
-We use sphinx-gallery's [notebook styled examples](https://sphinx-gallery.github.io/tutorials/plot_notebook.html#sphx-glr-tutorials-plot-notebook-py) to create the tutorials. Syntax is very simple. In essence, you write a slightly well formatted python file and it shows up as documentation page.
+We use sphinx-gallery's [notebook styled examples](https://sphinx-gallery.github.io/stable/tutorials/index.html) to create the tutorials. Syntax is very simple. In essence, you write a slightly well formatted Python file and it shows up as an HTML page. In addition, a Jupyter notebook is autogenerated and available to run in Google Colab.
+
+Here is how you can create a new tutorial (for a detailed description, see [CONTRIBUTING.md](./CONTRIBUTING.md)):
+
+NOTE: Before submitting a new tutorial, read [PyTorch Tutorial Submission Policy](./tutorial_submission_policy.md).
-Here's how to create a new tutorial:
-1. Create a notebook styled python file. If you want it executed while inserted into documentation, save the file with suffix `tutorial` so that file name is `your_tutorial.py`.
-2. Put it in one of the beginner_source, intermediate_source, advanced_source based on the level.
-2. Include it in the right TOC tree at index.rst
-3. Create a thumbnail in the index file using a command like `.. galleryitem:: beginner/your_tutorial.py`. (This is a custom directive. See `custom_directives.py` for more info.)
+1. Create a Python file. If you want it executed while inserted into documentation, save the file with the suffix `tutorial` so that the file name is `your_tutorial.py`.
+2. Put it in one of the `beginner_source`, `intermediate_source`, `advanced_source` directory based on the level of difficulty. If it is a recipe, add it to `recipes_source`. For tutorials demonstrating unstable prototype features, add to the `prototype_source`.
+3. For Tutorials (except if it is a prototype feature), include it in the `toctree` directive and create a `customcarditem` in [index.rst](./index.rst).
+4. For Tutorials (except if it is a prototype feature), create a thumbnail in the [index.rst file](https://github.com/pytorch/tutorials/blob/main/index.rst) using a command like `.. customcarditem:: beginner/your_tutorial.html`. For Recipes, create a thumbnail in the [recipes_index.rst](https://github.com/pytorch/tutorials/blob/main/recipes_index.rst)
-In case you prefer to write your tutorial in jupyter, you can use [this script](https://gist.github.com/chsasank/7218ca16f8d022e02a9c0deb94a310fe) to convert the notebook to python file. After conversion and addition to the project, please make sure the sections headings etc are in logical order.
+If you are starting off with a Jupyter notebook, you can use [this script](https://gist.github.com/chsasank/7218ca16f8d022e02a9c0deb94a310fe) to convert the notebook to Python file. After conversion and addition to the project, please make sure that section headings and other things are in logical order.
-## Building
+## Building locally
-- Start with installing torch, torchvision, and your GPUs latest drivers. Install other requirements using `pip install -r requirements.txt`
+The tutorial build is very large and requires a GPU. If your machine does not have a GPU device, you can preview your HTML build without actually downloading the data and running the tutorial code:
-> If you want to use `virtualenv`, make your environment in a `venv` directory like: `virtualenv ./venv`, then `source ./venv/bin/activate`.
+1. Install required dependencies by running: `pip install -r requirements.txt`.
-- Then you can build using `make docs`. This will download the data, execute the tutorials and build the documentation to `docs/` directory. This will take about 60-120 min for systems with GPUs. If you do not have a GPU installed on your system, then see next step.
+> Typically, you would run either in `conda` or `virtualenv`. If you want to use `virtualenv`, in the root of the repo, run: `virtualenv venv`, then `source venv/bin/activate`.
+
+- If you have a GPU-powered laptop, you can build using `make docs`. This will download the data, execute the tutorials and build the documentation to `docs/` directory. This might take about 60-120 min for systems with GPUs. If you do not have a GPU installed on your system, then see next step.
- You can skip the computationally intensive graph generation by running `make html-noplot` to build basic html documentation to `_build/html`. This way, you can quickly preview your tutorial.
+
+## Building a single tutorial
+
+You can build a single tutorial by using the `GALLERY_PATTERN` environment variable. For example to run only `neural_style_transfer_tutorial.py`, run:
+
+```
+GALLERY_PATTERN="neural_style_transfer_tutorial.py" make html
+```
+or
+
+```
+GALLERY_PATTERN="neural_style_transfer_tutorial.py" sphinx-build . _build
+```
+
+The `GALLERY_PATTERN` variable respects regular expressions.
+
+## Spell Check
+You can run pyspelling to check for spelling errors in the tutorials. To check only Python files, run pyspelling -n python. To check only .rst files, use pyspelling -n reST. Currently, .rst spell checking is limited to the beginner/ directory. Contributions to enable spell checking in other directories are welcome!
+
+
+```
+pyspelling # full check (~3 mins)
+pyspelling -n python # Python files only
+pyspelling -n reST # reST files (only beginner/ dir currently included)
+```
+
+
+## About contributing to PyTorch Documentation and Tutorials
+* You can find information about contributing to PyTorch documentation in the
+PyTorch Repo [README.md](https://github.com/pytorch/pytorch/blob/master/README.md) file.
+* Additional information can be found in [PyTorch CONTRIBUTING.md](https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md).
+
+
+## License
+
+PyTorch Tutorials is BSD licensed, as found in the LICENSE file.
diff --git a/_static/css/custom.css b/_static/css/custom.css
new file mode 100755
index 00000000000..a0882c1d4fc
--- /dev/null
+++ b/_static/css/custom.css
@@ -0,0 +1,97 @@
+/* sphinx-design styles for cards/tabs
+*/
+
+:root {
+ --sd-color-info: #ee4c2c;
+ --sd-color-primary: #6c6c6d;
+ --sd-color-primary-highlight: #f3f4f7;
+ --sd-color-card-border-hover: #ee4c2c;
+ --sd-color-card-border: #f3f4f7;
+ --sd-color-card-background: #fff;
+ --sd-color-card-text: inherit;
+ --sd-color-card-header: transparent;
+ --sd-color-card-footer: transparent;
+ --sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);
+ --sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);
+ --sd-color-tabs-label-inactive: hsl(0, 0%, 66%);
+ --sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);
+ --sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);
+ --sd-color-tabs-underline-inactive: transparent;
+ --sd-color-tabs-overline: rgb(222, 222, 222);
+ --sd-color-tabs-underline: rgb(222, 222, 222);
+}
+
+.sd-text-info {
+ color: #ee4c2c;
+}
+
+
+.sd-card {
+ position: relative;
+ background-color: #fff;
+ opacity: 1.0;
+ border-radius: 0px;
+ width: 30%;
+ border: none;
+ padding-bottom: 0px;
+}
+
+
+.sd-card-img {
+ opacity: 0.5;
+ width: 200px;
+ padding: 0px;
+}
+
+.sd-card-img:hover {
+ opacity: 1.0;
+ background-color: #f3f4f7;
+}
+
+
+.sd-card:after {
+ display: block;
+ opacity: 1;
+ content: '';
+ border-bottom: solid 1px #ee4c2c;
+ background-color: #fff;
+ transform: scaleX(0);
+ transition: transform .250s ease-in-out;
+ transform-origin: 0% 50%;
+}
+
+.sd-card:hover {
+ background-color: #fff;
+ opacity: 1;
+ border-top: 1px solid #f3f4f7;
+ border-left: 1px solid #f3f4f7;
+ border-right: 1px solid #f3f4f7;
+}
+
+.sd-card:hover:after {
+ transform: scaleX(1);
+}
+
+.card-prerequisites:hover {
+ transition: none;
+ border: none;
+}
+
+.card-prerequisites:hover:after {
+ transition: none;
+ transform: none;
+}
+
+.card-prerequisites:after {
+ display: block;
+ content: '';
+ border-bottom: none;
+ background-color: #fff;
+ transform: none;
+ transition: none;
+ transform-origin: none;
+}
+
+.pytorch-left-menu-search input[type=text] {
+ background-image: url("../images/search-icon.svg");
+}
diff --git a/_static/css/custom2.css b/_static/css/custom2.css
new file mode 100644
index 00000000000..a24ee796872
--- /dev/null
+++ b/_static/css/custom2.css
@@ -0,0 +1,112 @@
+/* Survey banner .css */
+
+.survey-banner {
+ margin-top: 10px;
+ background-color: #f3f4f7;
+ padding-top: 15px;
+ padding-left: 10px;
+ padding-bottom: 1px;
+}
+
+@media screen and (max-width: 600px) {
+ .survey-banner {
+ padding-top: 5px;
+ padding-left: 5px;
+ padding-bottom: -1px;
+ font-size: 12px;
+ margin-bottom: 5px;
+ }
+}
+
+/* Left nav for 2nd level nav */
+
+.pytorch-left-menu li.toctree-l2 {
+ padding-left: 10px;
+}
+
+.pytorch-left-menu li.toctree-l2.current > a, {
+ color: #ee4c2c;
+}
+
+.pytorch-left-menu li.toctree-l2.current a:link.reference.internal {
+ color: #ee4c2c;
+}
+
+.pytorch-left-menu li.toctree-l1.current > a:before {
+ content: "";
+}
+
+/* search radio button*/
+
+input[type="radio"] {
+ accent-color: #ee4c2c;
+}
+
+.gsst_b {
+ display: none;
+}
+
+#gsc-i-id1 {
+ height: 1.5rem;
+ text-indent: 12px !important;
+ font-size: 1rem !important;
+ font-family: "FreightSansi";
+ background-image: url(../images/search-icon.svg) !important;
+ background-repeat: no-repeat !important;
+ background-size: 18px 18px !important;
+ background-position: 5px 0px !important;
+ padding-left: 20px !important;
+}
+
+#gsc-i-id1::placeholder {
+ font-family: 'FreightSans';
+ font-size: 1rem;
+ color: #262626;
+}
+
+.gsc-control-cse {
+ padding: 0 !important;
+ border-radius: 0px !important;
+ border: none !important;
+}
+
+.gsc-overflow-hidden {
+ overflow: visible !important;
+}
+
+#___gcse_0 {
+ height: 44px !important;
+ padding: 0 !important;
+}
+
+table.gsc-search-box td.gsc-input {
+ padding-right: 0 !important;
+}
+
+table.gsc-search-box td {
+ height: 44px;
+ margin-bottom: 0 !important;
+ padding-bottom: 0 !important;
+}
+
+.gsc-search-button-v2 {
+ display: none;
+}
+
+.gs_id50 {
+ width: 308px;
+}
+
+.gsib_a {
+ padding: 0px 8px 4px 9px !important;
+}
+
+.gsc-input-box {
+ border-radius: 0px !important;
+ border: none !important;
+}
+
+form.gsc-search-box {
+ margin-bottom 0px;
+}
+
diff --git a/_static/doctools.js b/_static/doctools.js
deleted file mode 100755
index 6d984d66d2e..00000000000
--- a/_static/doctools.js
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
- * doctools.js
- * ~~~~~~~~~~~
- *
- * Sphinx JavaScript utilities for all documentation.
- *
- * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- * :license: BSD, see LICENSE for details.
- *
- */
-
-/**
- * select a different prefix for underscore
- */
-$u = _.noConflict();
-
-/**
- * make the code below compatible with browsers without
- * an installed firebug like debugger
-if (!window.console || !console.firebug) {
- var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
- "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
- "profile", "profileEnd"];
- window.console = {};
- for (var i = 0; i < names.length; ++i)
- window.console[names[i]] = function() {};
-}
- */
-
-/**
- * small helper function to urldecode strings
- */
-jQuery.urldecode = function(x) {
- return decodeURIComponent(x).replace(/\+/g, ' ');
-};
-
-/**
- * small helper function to urlencode strings
- */
-jQuery.urlencode = encodeURIComponent;
-
-/**
- * This function returns the parsed url parameters of the
- * current request. Multiple values per key are supported,
- * it will always return arrays of strings for the value parts.
- */
-jQuery.getQueryParameters = function(s) {
- if (typeof s === 'undefined')
- s = document.location.search;
- var parts = s.substr(s.indexOf('?') + 1).split('&');
- var result = {};
- for (var i = 0; i < parts.length; i++) {
- var tmp = parts[i].split('=', 2);
- var key = jQuery.urldecode(tmp[0]);
- var value = jQuery.urldecode(tmp[1]);
- if (key in result)
- result[key].push(value);
- else
- result[key] = [value];
- }
- return result;
-};
-
-/**
- * highlight a given string on a jquery object by wrapping it in
- * span elements with the given class name.
- */
-jQuery.fn.highlightText = function(text, className) {
- function highlight(node, addItems) {
- if (node.nodeType === 3) {
- var val = node.nodeValue;
- var pos = val.toLowerCase().indexOf(text);
- if (pos >= 0 &&
- !jQuery(node.parentNode).hasClass(className) &&
- !jQuery(node.parentNode).hasClass("nohighlight")) {
- var span;
- var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
- if (isInSVG) {
- span = document.createElementNS("https://www.w3.org/2000/svg", "tspan");
- } else {
- span = document.createElement("span");
- span.className = className;
- }
- span.appendChild(document.createTextNode(val.substr(pos, text.length)));
- node.parentNode.insertBefore(span, node.parentNode.insertBefore(
- document.createTextNode(val.substr(pos + text.length)),
- node.nextSibling));
- node.nodeValue = val.substr(0, pos);
- if (isInSVG) {
- var bbox = span.getBBox();
- var rect = document.createElementNS("https://www.w3.org/2000/svg", "rect");
- rect.x.baseVal.value = bbox.x;
- rect.y.baseVal.value = bbox.y;
- rect.width.baseVal.value = bbox.width;
- rect.height.baseVal.value = bbox.height;
- rect.setAttribute('class', className);
- var parentOfText = node.parentNode.parentNode;
- addItems.push({
- "parent": node.parentNode,
- "target": rect});
- }
- }
- }
- else if (!jQuery(node).is("button, select, textarea")) {
- jQuery.each(node.childNodes, function() {
- highlight(this, addItems);
- });
- }
- }
- var addItems = [];
- var result = this.each(function() {
- highlight(this, addItems);
- });
- for (var i = 0; i < addItems.length; ++i) {
- jQuery(addItems[i].parent).before(addItems[i].target);
- }
- return result;
-};
-
-/*
- * backward compatibility for jQuery.browser
- * This will be supported until firefox bug is fixed.
- */
-if (!jQuery.browser) {
- jQuery.uaMatch = function(ua) {
- ua = ua.toLowerCase();
-
- var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
- /(webkit)[ \/]([\w.]+)/.exec(ua) ||
- /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
- /(msie) ([\w.]+)/.exec(ua) ||
- ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
- [];
-
- return {
- browser: match[ 1 ] || "",
- version: match[ 2 ] || "0"
- };
- };
- jQuery.browser = {};
- jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
-}
-
-/**
- * Small JavaScript module for the documentation.
- */
-var Documentation = {
-
- init : function() {
- this.fixFirefoxAnchorBug();
- this.highlightSearchWords();
- this.initIndexTable();
-
- },
-
- /**
- * i18n support
- */
- TRANSLATIONS : {},
- PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; },
- LOCALE : 'unknown',
-
- // gettext and ngettext don't access this so that the functions
- // can safely bound to a different name (_ = Documentation.gettext)
- gettext : function(string) {
- var translated = Documentation.TRANSLATIONS[string];
- if (typeof translated === 'undefined')
- return string;
- return (typeof translated === 'string') ? translated : translated[0];
- },
-
- ngettext : function(singular, plural, n) {
- var translated = Documentation.TRANSLATIONS[singular];
- if (typeof translated === 'undefined')
- return (n == 1) ? singular : plural;
- return translated[Documentation.PLURALEXPR(n)];
- },
-
- addTranslations : function(catalog) {
- for (var key in catalog.messages)
- this.TRANSLATIONS[key] = catalog.messages[key];
- this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
- this.LOCALE = catalog.locale;
- },
-
- /**
- * add context elements like header anchor links
- */
- addContextElements : function() {
- $('div[id] > :header:first').each(function() {
- $('').
- attr('href', '#' + this.id).
- attr('title', _('Permalink to this headline')).
- appendTo(this);
- });
- $('dt[id]').each(function() {
- $('').
- attr('href', '#' + this.id).
- attr('title', _('Permalink to this definition')).
- appendTo(this);
- });
- },
-
- /**
- * workaround a firefox stupidity
- * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
- */
- fixFirefoxAnchorBug : function() {
- if (document.location.hash && $.browser.mozilla)
- window.setTimeout(function() {
- document.location.href += '';
- }, 10);
- },
-
- /**
- * highlight the search words provided in the url in the text
- */
- highlightSearchWords : function() {
- var params = $.getQueryParameters();
- var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
- if (terms.length) {
- var body = $('div.body');
- if (!body.length) {
- body = $('body');
- }
- window.setTimeout(function() {
- $.each(terms, function() {
- body.highlightText(this.toLowerCase(), 'highlighted');
- });
- }, 10);
- $('' + _('Hide Search Matches') + '
')
- .appendTo($('#searchbox'));
- }
- },
-
- /**
- * init the domain index toggle buttons
- */
- initIndexTable : function() {
- var togglers = $('img.toggler').click(function() {
- var src = $(this).attr('src');
- var idnum = $(this).attr('id').substr(7);
- $('tr.cg-' + idnum).toggle();
- if (src.substr(-9) === 'minus.png')
- $(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
- else
- $(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
- }).css('display', '');
- if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
- togglers.click();
- }
- },
-
- /**
- * helper function to hide the search marks again
- */
- hideSearchWords : function() {
- $('#searchbox .highlight-link').fadeOut(300);
- $('span.highlighted').removeClass('highlighted');
- },
-
- /**
- * make the url absolute
- */
- makeURL : function(relativeURL) {
- return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
- },
-
- /**
- * get the current relative url
- */
- getCurrentURL : function() {
- var path = document.location.pathname;
- var parts = path.split(/\//);
- $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
- if (this === '..')
- parts.pop();
- });
- var url = parts.join('/');
- return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
- },
-
- initOnKeyListeners: function() {
- $(document).keyup(function(event) {
- var activeElementType = document.activeElement.tagName;
- // don't navigate when in search box or textarea
- if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') {
- switch (event.keyCode) {
- case 37: // left
- var prevHref = $('link[rel="prev"]').prop('href');
- if (prevHref) {
- window.location.href = prevHref;
- return false;
- }
- case 39: // right
- var nextHref = $('link[rel="next"]').prop('href');
- if (nextHref) {
- window.location.href = nextHref;
- return false;
- }
- }
- }
- });
- }
-};
-
-// quick alias for translations
-_ = Documentation.gettext;
-
-$(document).ready(function() {
- Documentation.init();
-});
\ No newline at end of file
diff --git a/_static/documentation_options.js b/_static/documentation_options.js
deleted file mode 100755
index a9214d61b9c..00000000000
--- a/_static/documentation_options.js
+++ /dev/null
@@ -1,9 +0,0 @@
-var DOCUMENTATION_OPTIONS = {
- URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
- VERSION: '0.5.0a0+a24163a',
- LANGUAGE: 'None',
- COLLAPSE_INDEX: false,
- FILE_SUFFIX: '.html',
- HAS_SOURCE: true,
- SOURCELINK_SUFFIX: '.txt'
-};
\ No newline at end of file
diff --git a/_static/imagenet_class_index.json b/_static/imagenet_class_index.json
new file mode 100644
index 00000000000..5fe0dfefcd3
--- /dev/null
+++ b/_static/imagenet_class_index.json
@@ -0,0 +1 @@
+{"0": ["n01440764", "tench"], "1": ["n01443537", "goldfish"], "2": ["n01484850", "great_white_shark"], "3": ["n01491361", "tiger_shark"], "4": ["n01494475", "hammerhead"], "5": ["n01496331", "electric_ray"], "6": ["n01498041", "stingray"], "7": ["n01514668", "cock"], "8": ["n01514859", "hen"], "9": ["n01518878", "ostrich"], "10": ["n01530575", "brambling"], "11": ["n01531178", "goldfinch"], "12": ["n01532829", "house_finch"], "13": ["n01534433", "junco"], "14": ["n01537544", "indigo_bunting"], "15": ["n01558993", "robin"], "16": ["n01560419", "bulbul"], "17": ["n01580077", "jay"], "18": ["n01582220", "magpie"], "19": ["n01592084", "chickadee"], "20": ["n01601694", "water_ouzel"], "21": ["n01608432", "kite"], "22": ["n01614925", "bald_eagle"], "23": ["n01616318", "vulture"], "24": ["n01622779", "great_grey_owl"], "25": ["n01629819", "European_fire_salamander"], "26": ["n01630670", "common_newt"], "27": ["n01631663", "eft"], "28": ["n01632458", "spotted_salamander"], "29": ["n01632777", "axolotl"], "30": ["n01641577", "bullfrog"], "31": ["n01644373", "tree_frog"], "32": ["n01644900", "tailed_frog"], "33": ["n01664065", "loggerhead"], "34": ["n01665541", "leatherback_turtle"], "35": ["n01667114", "mud_turtle"], "36": ["n01667778", "terrapin"], "37": ["n01669191", "box_turtle"], "38": ["n01675722", "banded_gecko"], "39": ["n01677366", "common_iguana"], "40": ["n01682714", "American_chameleon"], "41": ["n01685808", "whiptail"], "42": ["n01687978", "agama"], "43": ["n01688243", "frilled_lizard"], "44": ["n01689811", "alligator_lizard"], "45": ["n01692333", "Gila_monster"], "46": ["n01693334", "green_lizard"], "47": ["n01694178", "African_chameleon"], "48": ["n01695060", "Komodo_dragon"], "49": ["n01697457", "African_crocodile"], "50": ["n01698640", "American_alligator"], "51": ["n01704323", "triceratops"], "52": ["n01728572", "thunder_snake"], "53": ["n01728920", "ringneck_snake"], "54": ["n01729322", "hognose_snake"], "55": ["n01729977", "green_snake"], "56": ["n01734418", "king_snake"], "57": ["n01735189", "garter_snake"], "58": ["n01737021", "water_snake"], "59": ["n01739381", "vine_snake"], "60": ["n01740131", "night_snake"], "61": ["n01742172", "boa_constrictor"], "62": ["n01744401", "rock_python"], "63": ["n01748264", "Indian_cobra"], "64": ["n01749939", "green_mamba"], "65": ["n01751748", "sea_snake"], "66": ["n01753488", "horned_viper"], "67": ["n01755581", "diamondback"], "68": ["n01756291", "sidewinder"], "69": ["n01768244", "trilobite"], "70": ["n01770081", "harvestman"], "71": ["n01770393", "scorpion"], "72": ["n01773157", "black_and_gold_garden_spider"], "73": ["n01773549", "barn_spider"], "74": ["n01773797", "garden_spider"], "75": ["n01774384", "black_widow"], "76": ["n01774750", "tarantula"], "77": ["n01775062", "wolf_spider"], "78": ["n01776313", "tick"], "79": ["n01784675", "centipede"], "80": ["n01795545", "black_grouse"], "81": ["n01796340", "ptarmigan"], "82": ["n01797886", "ruffed_grouse"], "83": ["n01798484", "prairie_chicken"], "84": ["n01806143", "peacock"], "85": ["n01806567", "quail"], "86": ["n01807496", "partridge"], "87": ["n01817953", "African_grey"], "88": ["n01818515", "macaw"], "89": ["n01819313", "sulphur-crested_cockatoo"], "90": ["n01820546", "lorikeet"], "91": ["n01824575", "coucal"], "92": ["n01828970", "bee_eater"], "93": ["n01829413", "hornbill"], "94": ["n01833805", "hummingbird"], "95": ["n01843065", "jacamar"], "96": ["n01843383", "toucan"], "97": ["n01847000", "drake"], "98": ["n01855032", "red-breasted_merganser"], "99": ["n01855672", "goose"], "100": ["n01860187", "black_swan"], "101": ["n01871265", "tusker"], "102": ["n01872401", "echidna"], "103": ["n01873310", "platypus"], "104": ["n01877812", "wallaby"], "105": ["n01882714", "koala"], "106": ["n01883070", "wombat"], "107": ["n01910747", "jellyfish"], "108": ["n01914609", "sea_anemone"], "109": ["n01917289", "brain_coral"], "110": ["n01924916", "flatworm"], "111": ["n01930112", "nematode"], "112": ["n01943899", "conch"], "113": ["n01944390", "snail"], "114": ["n01945685", "slug"], "115": ["n01950731", "sea_slug"], "116": ["n01955084", "chiton"], "117": ["n01968897", "chambered_nautilus"], "118": ["n01978287", "Dungeness_crab"], "119": ["n01978455", "rock_crab"], "120": ["n01980166", "fiddler_crab"], "121": ["n01981276", "king_crab"], "122": ["n01983481", "American_lobster"], "123": ["n01984695", "spiny_lobster"], "124": ["n01985128", "crayfish"], "125": ["n01986214", "hermit_crab"], "126": ["n01990800", "isopod"], "127": ["n02002556", "white_stork"], "128": ["n02002724", "black_stork"], "129": ["n02006656", "spoonbill"], "130": ["n02007558", "flamingo"], "131": ["n02009229", "little_blue_heron"], "132": ["n02009912", "American_egret"], "133": ["n02011460", "bittern"], "134": ["n02012849", "crane"], "135": ["n02013706", "limpkin"], "136": ["n02017213", "European_gallinule"], "137": ["n02018207", "American_coot"], "138": ["n02018795", "bustard"], "139": ["n02025239", "ruddy_turnstone"], "140": ["n02027492", "red-backed_sandpiper"], "141": ["n02028035", "redshank"], "142": ["n02033041", "dowitcher"], "143": ["n02037110", "oystercatcher"], "144": ["n02051845", "pelican"], "145": ["n02056570", "king_penguin"], "146": ["n02058221", "albatross"], "147": ["n02066245", "grey_whale"], "148": ["n02071294", "killer_whale"], "149": ["n02074367", "dugong"], "150": ["n02077923", "sea_lion"], "151": ["n02085620", "Chihuahua"], "152": ["n02085782", "Japanese_spaniel"], "153": ["n02085936", "Maltese_dog"], "154": ["n02086079", "Pekinese"], "155": ["n02086240", "Shih-Tzu"], "156": ["n02086646", "Blenheim_spaniel"], "157": ["n02086910", "papillon"], "158": ["n02087046", "toy_terrier"], "159": ["n02087394", "Rhodesian_ridgeback"], "160": ["n02088094", "Afghan_hound"], "161": ["n02088238", "basset"], "162": ["n02088364", "beagle"], "163": ["n02088466", "bloodhound"], "164": ["n02088632", "bluetick"], "165": ["n02089078", "black-and-tan_coonhound"], "166": ["n02089867", "Walker_hound"], "167": ["n02089973", "English_foxhound"], "168": ["n02090379", "redbone"], "169": ["n02090622", "borzoi"], "170": ["n02090721", "Irish_wolfhound"], "171": ["n02091032", "Italian_greyhound"], "172": ["n02091134", "whippet"], "173": ["n02091244", "Ibizan_hound"], "174": ["n02091467", "Norwegian_elkhound"], "175": ["n02091635", "otterhound"], "176": ["n02091831", "Saluki"], "177": ["n02092002", "Scottish_deerhound"], "178": ["n02092339", "Weimaraner"], "179": ["n02093256", "Staffordshire_bullterrier"], "180": ["n02093428", "American_Staffordshire_terrier"], "181": ["n02093647", "Bedlington_terrier"], "182": ["n02093754", "Border_terrier"], "183": ["n02093859", "Kerry_blue_terrier"], "184": ["n02093991", "Irish_terrier"], "185": ["n02094114", "Norfolk_terrier"], "186": ["n02094258", "Norwich_terrier"], "187": ["n02094433", "Yorkshire_terrier"], "188": ["n02095314", "wire-haired_fox_terrier"], "189": ["n02095570", "Lakeland_terrier"], "190": ["n02095889", "Sealyham_terrier"], "191": ["n02096051", "Airedale"], "192": ["n02096177", "cairn"], "193": ["n02096294", "Australian_terrier"], "194": ["n02096437", "Dandie_Dinmont"], "195": ["n02096585", "Boston_bull"], "196": ["n02097047", "miniature_schnauzer"], "197": ["n02097130", "giant_schnauzer"], "198": ["n02097209", "standard_schnauzer"], "199": ["n02097298", "Scotch_terrier"], "200": ["n02097474", "Tibetan_terrier"], "201": ["n02097658", "silky_terrier"], "202": ["n02098105", "soft-coated_wheaten_terrier"], "203": ["n02098286", "West_Highland_white_terrier"], "204": ["n02098413", "Lhasa"], "205": ["n02099267", "flat-coated_retriever"], "206": ["n02099429", "curly-coated_retriever"], "207": ["n02099601", "golden_retriever"], "208": ["n02099712", "Labrador_retriever"], "209": ["n02099849", "Chesapeake_Bay_retriever"], "210": ["n02100236", "German_short-haired_pointer"], "211": ["n02100583", "vizsla"], "212": ["n02100735", "English_setter"], "213": ["n02100877", "Irish_setter"], "214": ["n02101006", "Gordon_setter"], "215": ["n02101388", "Brittany_spaniel"], "216": ["n02101556", "clumber"], "217": ["n02102040", "English_springer"], "218": ["n02102177", "Welsh_springer_spaniel"], "219": ["n02102318", "cocker_spaniel"], "220": ["n02102480", "Sussex_spaniel"], "221": ["n02102973", "Irish_water_spaniel"], "222": ["n02104029", "kuvasz"], "223": ["n02104365", "schipperke"], "224": ["n02105056", "groenendael"], "225": ["n02105162", "malinois"], "226": ["n02105251", "briard"], "227": ["n02105412", "kelpie"], "228": ["n02105505", "komondor"], "229": ["n02105641", "Old_English_sheepdog"], "230": ["n02105855", "Shetland_sheepdog"], "231": ["n02106030", "collie"], "232": ["n02106166", "Border_collie"], "233": ["n02106382", "Bouvier_des_Flandres"], "234": ["n02106550", "Rottweiler"], "235": ["n02106662", "German_shepherd"], "236": ["n02107142", "Doberman"], "237": ["n02107312", "miniature_pinscher"], "238": ["n02107574", "Greater_Swiss_Mountain_dog"], "239": ["n02107683", "Bernese_mountain_dog"], "240": ["n02107908", "Appenzeller"], "241": ["n02108000", "EntleBucher"], "242": ["n02108089", "boxer"], "243": ["n02108422", "bull_mastiff"], "244": ["n02108551", "Tibetan_mastiff"], "245": ["n02108915", "French_bulldog"], "246": ["n02109047", "Great_Dane"], "247": ["n02109525", "Saint_Bernard"], "248": ["n02109961", "Eskimo_dog"], "249": ["n02110063", "malamute"], "250": ["n02110185", "Siberian_husky"], "251": ["n02110341", "dalmatian"], "252": ["n02110627", "affenpinscher"], "253": ["n02110806", "basenji"], "254": ["n02110958", "pug"], "255": ["n02111129", "Leonberg"], "256": ["n02111277", "Newfoundland"], "257": ["n02111500", "Great_Pyrenees"], "258": ["n02111889", "Samoyed"], "259": ["n02112018", "Pomeranian"], "260": ["n02112137", "chow"], "261": ["n02112350", "keeshond"], "262": ["n02112706", "Brabancon_griffon"], "263": ["n02113023", "Pembroke"], "264": ["n02113186", "Cardigan"], "265": ["n02113624", "toy_poodle"], "266": ["n02113712", "miniature_poodle"], "267": ["n02113799", "standard_poodle"], "268": ["n02113978", "Mexican_hairless"], "269": ["n02114367", "timber_wolf"], "270": ["n02114548", "white_wolf"], "271": ["n02114712", "red_wolf"], "272": ["n02114855", "coyote"], "273": ["n02115641", "dingo"], "274": ["n02115913", "dhole"], "275": ["n02116738", "African_hunting_dog"], "276": ["n02117135", "hyena"], "277": ["n02119022", "red_fox"], "278": ["n02119789", "kit_fox"], "279": ["n02120079", "Arctic_fox"], "280": ["n02120505", "grey_fox"], "281": ["n02123045", "tabby"], "282": ["n02123159", "tiger_cat"], "283": ["n02123394", "Persian_cat"], "284": ["n02123597", "Siamese_cat"], "285": ["n02124075", "Egyptian_cat"], "286": ["n02125311", "cougar"], "287": ["n02127052", "lynx"], "288": ["n02128385", "leopard"], "289": ["n02128757", "snow_leopard"], "290": ["n02128925", "jaguar"], "291": ["n02129165", "lion"], "292": ["n02129604", "tiger"], "293": ["n02130308", "cheetah"], "294": ["n02132136", "brown_bear"], "295": ["n02133161", "American_black_bear"], "296": ["n02134084", "ice_bear"], "297": ["n02134418", "sloth_bear"], "298": ["n02137549", "mongoose"], "299": ["n02138441", "meerkat"], "300": ["n02165105", "tiger_beetle"], "301": ["n02165456", "ladybug"], "302": ["n02167151", "ground_beetle"], "303": ["n02168699", "long-horned_beetle"], "304": ["n02169497", "leaf_beetle"], "305": ["n02172182", "dung_beetle"], "306": ["n02174001", "rhinoceros_beetle"], "307": ["n02177972", "weevil"], "308": ["n02190166", "fly"], "309": ["n02206856", "bee"], "310": ["n02219486", "ant"], "311": ["n02226429", "grasshopper"], "312": ["n02229544", "cricket"], "313": ["n02231487", "walking_stick"], "314": ["n02233338", "cockroach"], "315": ["n02236044", "mantis"], "316": ["n02256656", "cicada"], "317": ["n02259212", "leafhopper"], "318": ["n02264363", "lacewing"], "319": ["n02268443", "dragonfly"], "320": ["n02268853", "damselfly"], "321": ["n02276258", "admiral"], "322": ["n02277742", "ringlet"], "323": ["n02279972", "monarch"], "324": ["n02280649", "cabbage_butterfly"], "325": ["n02281406", "sulphur_butterfly"], "326": ["n02281787", "lycaenid"], "327": ["n02317335", "starfish"], "328": ["n02319095", "sea_urchin"], "329": ["n02321529", "sea_cucumber"], "330": ["n02325366", "wood_rabbit"], "331": ["n02326432", "hare"], "332": ["n02328150", "Angora"], "333": ["n02342885", "hamster"], "334": ["n02346627", "porcupine"], "335": ["n02356798", "fox_squirrel"], "336": ["n02361337", "marmot"], "337": ["n02363005", "beaver"], "338": ["n02364673", "guinea_pig"], "339": ["n02389026", "sorrel"], "340": ["n02391049", "zebra"], "341": ["n02395406", "hog"], "342": ["n02396427", "wild_boar"], "343": ["n02397096", "warthog"], "344": ["n02398521", "hippopotamus"], "345": ["n02403003", "ox"], "346": ["n02408429", "water_buffalo"], "347": ["n02410509", "bison"], "348": ["n02412080", "ram"], "349": ["n02415577", "bighorn"], "350": ["n02417914", "ibex"], "351": ["n02422106", "hartebeest"], "352": ["n02422699", "impala"], "353": ["n02423022", "gazelle"], "354": ["n02437312", "Arabian_camel"], "355": ["n02437616", "llama"], "356": ["n02441942", "weasel"], "357": ["n02442845", "mink"], "358": ["n02443114", "polecat"], "359": ["n02443484", "black-footed_ferret"], "360": ["n02444819", "otter"], "361": ["n02445715", "skunk"], "362": ["n02447366", "badger"], "363": ["n02454379", "armadillo"], "364": ["n02457408", "three-toed_sloth"], "365": ["n02480495", "orangutan"], "366": ["n02480855", "gorilla"], "367": ["n02481823", "chimpanzee"], "368": ["n02483362", "gibbon"], "369": ["n02483708", "siamang"], "370": ["n02484975", "guenon"], "371": ["n02486261", "patas"], "372": ["n02486410", "baboon"], "373": ["n02487347", "macaque"], "374": ["n02488291", "langur"], "375": ["n02488702", "colobus"], "376": ["n02489166", "proboscis_monkey"], "377": ["n02490219", "marmoset"], "378": ["n02492035", "capuchin"], "379": ["n02492660", "howler_monkey"], "380": ["n02493509", "titi"], "381": ["n02493793", "spider_monkey"], "382": ["n02494079", "squirrel_monkey"], "383": ["n02497673", "Madagascar_cat"], "384": ["n02500267", "indri"], "385": ["n02504013", "Indian_elephant"], "386": ["n02504458", "African_elephant"], "387": ["n02509815", "lesser_panda"], "388": ["n02510455", "giant_panda"], "389": ["n02514041", "barracouta"], "390": ["n02526121", "eel"], "391": ["n02536864", "coho"], "392": ["n02606052", "rock_beauty"], "393": ["n02607072", "anemone_fish"], "394": ["n02640242", "sturgeon"], "395": ["n02641379", "gar"], "396": ["n02643566", "lionfish"], "397": ["n02655020", "puffer"], "398": ["n02666196", "abacus"], "399": ["n02667093", "abaya"], "400": ["n02669723", "academic_gown"], "401": ["n02672831", "accordion"], "402": ["n02676566", "acoustic_guitar"], "403": ["n02687172", "aircraft_carrier"], "404": ["n02690373", "airliner"], "405": ["n02692877", "airship"], "406": ["n02699494", "altar"], "407": ["n02701002", "ambulance"], "408": ["n02704792", "amphibian"], "409": ["n02708093", "analog_clock"], "410": ["n02727426", "apiary"], "411": ["n02730930", "apron"], "412": ["n02747177", "ashcan"], "413": ["n02749479", "assault_rifle"], "414": ["n02769748", "backpack"], "415": ["n02776631", "bakery"], "416": ["n02777292", "balance_beam"], "417": ["n02782093", "balloon"], "418": ["n02783161", "ballpoint"], "419": ["n02786058", "Band_Aid"], "420": ["n02787622", "banjo"], "421": ["n02788148", "bannister"], "422": ["n02790996", "barbell"], "423": ["n02791124", "barber_chair"], "424": ["n02791270", "barbershop"], "425": ["n02793495", "barn"], "426": ["n02794156", "barometer"], "427": ["n02795169", "barrel"], "428": ["n02797295", "barrow"], "429": ["n02799071", "baseball"], "430": ["n02802426", "basketball"], "431": ["n02804414", "bassinet"], "432": ["n02804610", "bassoon"], "433": ["n02807133", "bathing_cap"], "434": ["n02808304", "bath_towel"], "435": ["n02808440", "bathtub"], "436": ["n02814533", "beach_wagon"], "437": ["n02814860", "beacon"], "438": ["n02815834", "beaker"], "439": ["n02817516", "bearskin"], "440": ["n02823428", "beer_bottle"], "441": ["n02823750", "beer_glass"], "442": ["n02825657", "bell_cote"], "443": ["n02834397", "bib"], "444": ["n02835271", "bicycle-built-for-two"], "445": ["n02837789", "bikini"], "446": ["n02840245", "binder"], "447": ["n02841315", "binoculars"], "448": ["n02843684", "birdhouse"], "449": ["n02859443", "boathouse"], "450": ["n02860847", "bobsled"], "451": ["n02865351", "bolo_tie"], "452": ["n02869837", "bonnet"], "453": ["n02870880", "bookcase"], "454": ["n02871525", "bookshop"], "455": ["n02877765", "bottlecap"], "456": ["n02879718", "bow"], "457": ["n02883205", "bow_tie"], "458": ["n02892201", "brass"], "459": ["n02892767", "brassiere"], "460": ["n02894605", "breakwater"], "461": ["n02895154", "breastplate"], "462": ["n02906734", "broom"], "463": ["n02909870", "bucket"], "464": ["n02910353", "buckle"], "465": ["n02916936", "bulletproof_vest"], "466": ["n02917067", "bullet_train"], "467": ["n02927161", "butcher_shop"], "468": ["n02930766", "cab"], "469": ["n02939185", "caldron"], "470": ["n02948072", "candle"], "471": ["n02950826", "cannon"], "472": ["n02951358", "canoe"], "473": ["n02951585", "can_opener"], "474": ["n02963159", "cardigan"], "475": ["n02965783", "car_mirror"], "476": ["n02966193", "carousel"], "477": ["n02966687", "carpenter's_kit"], "478": ["n02971356", "carton"], "479": ["n02974003", "car_wheel"], "480": ["n02977058", "cash_machine"], "481": ["n02978881", "cassette"], "482": ["n02979186", "cassette_player"], "483": ["n02980441", "castle"], "484": ["n02981792", "catamaran"], "485": ["n02988304", "CD_player"], "486": ["n02992211", "cello"], "487": ["n02992529", "cellular_telephone"], "488": ["n02999410", "chain"], "489": ["n03000134", "chainlink_fence"], "490": ["n03000247", "chain_mail"], "491": ["n03000684", "chain_saw"], "492": ["n03014705", "chest"], "493": ["n03016953", "chiffonier"], "494": ["n03017168", "chime"], "495": ["n03018349", "china_cabinet"], "496": ["n03026506", "Christmas_stocking"], "497": ["n03028079", "church"], "498": ["n03032252", "cinema"], "499": ["n03041632", "cleaver"], "500": ["n03042490", "cliff_dwelling"], "501": ["n03045698", "cloak"], "502": ["n03047690", "clog"], "503": ["n03062245", "cocktail_shaker"], "504": ["n03063599", "coffee_mug"], "505": ["n03063689", "coffeepot"], "506": ["n03065424", "coil"], "507": ["n03075370", "combination_lock"], "508": ["n03085013", "computer_keyboard"], "509": ["n03089624", "confectionery"], "510": ["n03095699", "container_ship"], "511": ["n03100240", "convertible"], "512": ["n03109150", "corkscrew"], "513": ["n03110669", "cornet"], "514": ["n03124043", "cowboy_boot"], "515": ["n03124170", "cowboy_hat"], "516": ["n03125729", "cradle"], "517": ["n03126707", "crane"], "518": ["n03127747", "crash_helmet"], "519": ["n03127925", "crate"], "520": ["n03131574", "crib"], "521": ["n03133878", "Crock_Pot"], "522": ["n03134739", "croquet_ball"], "523": ["n03141823", "crutch"], "524": ["n03146219", "cuirass"], "525": ["n03160309", "dam"], "526": ["n03179701", "desk"], "527": ["n03180011", "desktop_computer"], "528": ["n03187595", "dial_telephone"], "529": ["n03188531", "diaper"], "530": ["n03196217", "digital_clock"], "531": ["n03197337", "digital_watch"], "532": ["n03201208", "dining_table"], "533": ["n03207743", "dishrag"], "534": ["n03207941", "dishwasher"], "535": ["n03208938", "disk_brake"], "536": ["n03216828", "dock"], "537": ["n03218198", "dogsled"], "538": ["n03220513", "dome"], "539": ["n03223299", "doormat"], "540": ["n03240683", "drilling_platform"], "541": ["n03249569", "drum"], "542": ["n03250847", "drumstick"], "543": ["n03255030", "dumbbell"], "544": ["n03259280", "Dutch_oven"], "545": ["n03271574", "electric_fan"], "546": ["n03272010", "electric_guitar"], "547": ["n03272562", "electric_locomotive"], "548": ["n03290653", "entertainment_center"], "549": ["n03291819", "envelope"], "550": ["n03297495", "espresso_maker"], "551": ["n03314780", "face_powder"], "552": ["n03325584", "feather_boa"], "553": ["n03337140", "file"], "554": ["n03344393", "fireboat"], "555": ["n03345487", "fire_engine"], "556": ["n03347037", "fire_screen"], "557": ["n03355925", "flagpole"], "558": ["n03372029", "flute"], "559": ["n03376595", "folding_chair"], "560": ["n03379051", "football_helmet"], "561": ["n03384352", "forklift"], "562": ["n03388043", "fountain"], "563": ["n03388183", "fountain_pen"], "564": ["n03388549", "four-poster"], "565": ["n03393912", "freight_car"], "566": ["n03394916", "French_horn"], "567": ["n03400231", "frying_pan"], "568": ["n03404251", "fur_coat"], "569": ["n03417042", "garbage_truck"], "570": ["n03424325", "gasmask"], "571": ["n03425413", "gas_pump"], "572": ["n03443371", "goblet"], "573": ["n03444034", "go-kart"], "574": ["n03445777", "golf_ball"], "575": ["n03445924", "golfcart"], "576": ["n03447447", "gondola"], "577": ["n03447721", "gong"], "578": ["n03450230", "gown"], "579": ["n03452741", "grand_piano"], "580": ["n03457902", "greenhouse"], "581": ["n03459775", "grille"], "582": ["n03461385", "grocery_store"], "583": ["n03467068", "guillotine"], "584": ["n03476684", "hair_slide"], "585": ["n03476991", "hair_spray"], "586": ["n03478589", "half_track"], "587": ["n03481172", "hammer"], "588": ["n03482405", "hamper"], "589": ["n03483316", "hand_blower"], "590": ["n03485407", "hand-held_computer"], "591": ["n03485794", "handkerchief"], "592": ["n03492542", "hard_disc"], "593": ["n03494278", "harmonica"], "594": ["n03495258", "harp"], "595": ["n03496892", "harvester"], "596": ["n03498962", "hatchet"], "597": ["n03527444", "holster"], "598": ["n03529860", "home_theater"], "599": ["n03530642", "honeycomb"], "600": ["n03532672", "hook"], "601": ["n03534580", "hoopskirt"], "602": ["n03535780", "horizontal_bar"], "603": ["n03538406", "horse_cart"], "604": ["n03544143", "hourglass"], "605": ["n03584254", "iPod"], "606": ["n03584829", "iron"], "607": ["n03590841", "jack-o'-lantern"], "608": ["n03594734", "jean"], "609": ["n03594945", "jeep"], "610": ["n03595614", "jersey"], "611": ["n03598930", "jigsaw_puzzle"], "612": ["n03599486", "jinrikisha"], "613": ["n03602883", "joystick"], "614": ["n03617480", "kimono"], "615": ["n03623198", "knee_pad"], "616": ["n03627232", "knot"], "617": ["n03630383", "lab_coat"], "618": ["n03633091", "ladle"], "619": ["n03637318", "lampshade"], "620": ["n03642806", "laptop"], "621": ["n03649909", "lawn_mower"], "622": ["n03657121", "lens_cap"], "623": ["n03658185", "letter_opener"], "624": ["n03661043", "library"], "625": ["n03662601", "lifeboat"], "626": ["n03666591", "lighter"], "627": ["n03670208", "limousine"], "628": ["n03673027", "liner"], "629": ["n03676483", "lipstick"], "630": ["n03680355", "Loafer"], "631": ["n03690938", "lotion"], "632": ["n03691459", "loudspeaker"], "633": ["n03692522", "loupe"], "634": ["n03697007", "lumbermill"], "635": ["n03706229", "magnetic_compass"], "636": ["n03709823", "mailbag"], "637": ["n03710193", "mailbox"], "638": ["n03710637", "maillot"], "639": ["n03710721", "maillot"], "640": ["n03717622", "manhole_cover"], "641": ["n03720891", "maraca"], "642": ["n03721384", "marimba"], "643": ["n03724870", "mask"], "644": ["n03729826", "matchstick"], "645": ["n03733131", "maypole"], "646": ["n03733281", "maze"], "647": ["n03733805", "measuring_cup"], "648": ["n03742115", "medicine_chest"], "649": ["n03743016", "megalith"], "650": ["n03759954", "microphone"], "651": ["n03761084", "microwave"], "652": ["n03763968", "military_uniform"], "653": ["n03764736", "milk_can"], "654": ["n03769881", "minibus"], "655": ["n03770439", "miniskirt"], "656": ["n03770679", "minivan"], "657": ["n03773504", "missile"], "658": ["n03775071", "mitten"], "659": ["n03775546", "mixing_bowl"], "660": ["n03776460", "mobile_home"], "661": ["n03777568", "Model_T"], "662": ["n03777754", "modem"], "663": ["n03781244", "monastery"], "664": ["n03782006", "monitor"], "665": ["n03785016", "moped"], "666": ["n03786901", "mortar"], "667": ["n03787032", "mortarboard"], "668": ["n03788195", "mosque"], "669": ["n03788365", "mosquito_net"], "670": ["n03791053", "motor_scooter"], "671": ["n03792782", "mountain_bike"], "672": ["n03792972", "mountain_tent"], "673": ["n03793489", "mouse"], "674": ["n03794056", "mousetrap"], "675": ["n03796401", "moving_van"], "676": ["n03803284", "muzzle"], "677": ["n03804744", "nail"], "678": ["n03814639", "neck_brace"], "679": ["n03814906", "necklace"], "680": ["n03825788", "nipple"], "681": ["n03832673", "notebook"], "682": ["n03837869", "obelisk"], "683": ["n03838899", "oboe"], "684": ["n03840681", "ocarina"], "685": ["n03841143", "odometer"], "686": ["n03843555", "oil_filter"], "687": ["n03854065", "organ"], "688": ["n03857828", "oscilloscope"], "689": ["n03866082", "overskirt"], "690": ["n03868242", "oxcart"], "691": ["n03868863", "oxygen_mask"], "692": ["n03871628", "packet"], "693": ["n03873416", "paddle"], "694": ["n03874293", "paddlewheel"], "695": ["n03874599", "padlock"], "696": ["n03876231", "paintbrush"], "697": ["n03877472", "pajama"], "698": ["n03877845", "palace"], "699": ["n03884397", "panpipe"], "700": ["n03887697", "paper_towel"], "701": ["n03888257", "parachute"], "702": ["n03888605", "parallel_bars"], "703": ["n03891251", "park_bench"], "704": ["n03891332", "parking_meter"], "705": ["n03895866", "passenger_car"], "706": ["n03899768", "patio"], "707": ["n03902125", "pay-phone"], "708": ["n03903868", "pedestal"], "709": ["n03908618", "pencil_box"], "710": ["n03908714", "pencil_sharpener"], "711": ["n03916031", "perfume"], "712": ["n03920288", "Petri_dish"], "713": ["n03924679", "photocopier"], "714": ["n03929660", "pick"], "715": ["n03929855", "pickelhaube"], "716": ["n03930313", "picket_fence"], "717": ["n03930630", "pickup"], "718": ["n03933933", "pier"], "719": ["n03935335", "piggy_bank"], "720": ["n03937543", "pill_bottle"], "721": ["n03938244", "pillow"], "722": ["n03942813", "ping-pong_ball"], "723": ["n03944341", "pinwheel"], "724": ["n03947888", "pirate"], "725": ["n03950228", "pitcher"], "726": ["n03954731", "plane"], "727": ["n03956157", "planetarium"], "728": ["n03958227", "plastic_bag"], "729": ["n03961711", "plate_rack"], "730": ["n03967562", "plow"], "731": ["n03970156", "plunger"], "732": ["n03976467", "Polaroid_camera"], "733": ["n03976657", "pole"], "734": ["n03977966", "police_van"], "735": ["n03980874", "poncho"], "736": ["n03982430", "pool_table"], "737": ["n03983396", "pop_bottle"], "738": ["n03991062", "pot"], "739": ["n03992509", "potter's_wheel"], "740": ["n03995372", "power_drill"], "741": ["n03998194", "prayer_rug"], "742": ["n04004767", "printer"], "743": ["n04005630", "prison"], "744": ["n04008634", "projectile"], "745": ["n04009552", "projector"], "746": ["n04019541", "puck"], "747": ["n04023962", "punching_bag"], "748": ["n04026417", "purse"], "749": ["n04033901", "quill"], "750": ["n04033995", "quilt"], "751": ["n04037443", "racer"], "752": ["n04039381", "racket"], "753": ["n04040759", "radiator"], "754": ["n04041544", "radio"], "755": ["n04044716", "radio_telescope"], "756": ["n04049303", "rain_barrel"], "757": ["n04065272", "recreational_vehicle"], "758": ["n04067472", "reel"], "759": ["n04069434", "reflex_camera"], "760": ["n04070727", "refrigerator"], "761": ["n04074963", "remote_control"], "762": ["n04081281", "restaurant"], "763": ["n04086273", "revolver"], "764": ["n04090263", "rifle"], "765": ["n04099969", "rocking_chair"], "766": ["n04111531", "rotisserie"], "767": ["n04116512", "rubber_eraser"], "768": ["n04118538", "rugby_ball"], "769": ["n04118776", "rule"], "770": ["n04120489", "running_shoe"], "771": ["n04125021", "safe"], "772": ["n04127249", "safety_pin"], "773": ["n04131690", "saltshaker"], "774": ["n04133789", "sandal"], "775": ["n04136333", "sarong"], "776": ["n04141076", "sax"], "777": ["n04141327", "scabbard"], "778": ["n04141975", "scale"], "779": ["n04146614", "school_bus"], "780": ["n04147183", "schooner"], "781": ["n04149813", "scoreboard"], "782": ["n04152593", "screen"], "783": ["n04153751", "screw"], "784": ["n04154565", "screwdriver"], "785": ["n04162706", "seat_belt"], "786": ["n04179913", "sewing_machine"], "787": ["n04192698", "shield"], "788": ["n04200800", "shoe_shop"], "789": ["n04201297", "shoji"], "790": ["n04204238", "shopping_basket"], "791": ["n04204347", "shopping_cart"], "792": ["n04208210", "shovel"], "793": ["n04209133", "shower_cap"], "794": ["n04209239", "shower_curtain"], "795": ["n04228054", "ski"], "796": ["n04229816", "ski_mask"], "797": ["n04235860", "sleeping_bag"], "798": ["n04238763", "slide_rule"], "799": ["n04239074", "sliding_door"], "800": ["n04243546", "slot"], "801": ["n04251144", "snorkel"], "802": ["n04252077", "snowmobile"], "803": ["n04252225", "snowplow"], "804": ["n04254120", "soap_dispenser"], "805": ["n04254680", "soccer_ball"], "806": ["n04254777", "sock"], "807": ["n04258138", "solar_dish"], "808": ["n04259630", "sombrero"], "809": ["n04263257", "soup_bowl"], "810": ["n04264628", "space_bar"], "811": ["n04265275", "space_heater"], "812": ["n04266014", "space_shuttle"], "813": ["n04270147", "spatula"], "814": ["n04273569", "speedboat"], "815": ["n04275548", "spider_web"], "816": ["n04277352", "spindle"], "817": ["n04285008", "sports_car"], "818": ["n04286575", "spotlight"], "819": ["n04296562", "stage"], "820": ["n04310018", "steam_locomotive"], "821": ["n04311004", "steel_arch_bridge"], "822": ["n04311174", "steel_drum"], "823": ["n04317175", "stethoscope"], "824": ["n04325704", "stole"], "825": ["n04326547", "stone_wall"], "826": ["n04328186", "stopwatch"], "827": ["n04330267", "stove"], "828": ["n04332243", "strainer"], "829": ["n04335435", "streetcar"], "830": ["n04336792", "stretcher"], "831": ["n04344873", "studio_couch"], "832": ["n04346328", "stupa"], "833": ["n04347754", "submarine"], "834": ["n04350905", "suit"], "835": ["n04355338", "sundial"], "836": ["n04355933", "sunglass"], "837": ["n04356056", "sunglasses"], "838": ["n04357314", "sunscreen"], "839": ["n04366367", "suspension_bridge"], "840": ["n04367480", "swab"], "841": ["n04370456", "sweatshirt"], "842": ["n04371430", "swimming_trunks"], "843": ["n04371774", "swing"], "844": ["n04372370", "switch"], "845": ["n04376876", "syringe"], "846": ["n04380533", "table_lamp"], "847": ["n04389033", "tank"], "848": ["n04392985", "tape_player"], "849": ["n04398044", "teapot"], "850": ["n04399382", "teddy"], "851": ["n04404412", "television"], "852": ["n04409515", "tennis_ball"], "853": ["n04417672", "thatch"], "854": ["n04418357", "theater_curtain"], "855": ["n04423845", "thimble"], "856": ["n04428191", "thresher"], "857": ["n04429376", "throne"], "858": ["n04435653", "tile_roof"], "859": ["n04442312", "toaster"], "860": ["n04443257", "tobacco_shop"], "861": ["n04447861", "toilet_seat"], "862": ["n04456115", "torch"], "863": ["n04458633", "totem_pole"], "864": ["n04461696", "tow_truck"], "865": ["n04462240", "toyshop"], "866": ["n04465501", "tractor"], "867": ["n04467665", "trailer_truck"], "868": ["n04476259", "tray"], "869": ["n04479046", "trench_coat"], "870": ["n04482393", "tricycle"], "871": ["n04483307", "trimaran"], "872": ["n04485082", "tripod"], "873": ["n04486054", "triumphal_arch"], "874": ["n04487081", "trolleybus"], "875": ["n04487394", "trombone"], "876": ["n04493381", "tub"], "877": ["n04501370", "turnstile"], "878": ["n04505470", "typewriter_keyboard"], "879": ["n04507155", "umbrella"], "880": ["n04509417", "unicycle"], "881": ["n04515003", "upright"], "882": ["n04517823", "vacuum"], "883": ["n04522168", "vase"], "884": ["n04523525", "vault"], "885": ["n04525038", "velvet"], "886": ["n04525305", "vending_machine"], "887": ["n04532106", "vestment"], "888": ["n04532670", "viaduct"], "889": ["n04536866", "violin"], "890": ["n04540053", "volleyball"], "891": ["n04542943", "waffle_iron"], "892": ["n04548280", "wall_clock"], "893": ["n04548362", "wallet"], "894": ["n04550184", "wardrobe"], "895": ["n04552348", "warplane"], "896": ["n04553703", "washbasin"], "897": ["n04554684", "washer"], "898": ["n04557648", "water_bottle"], "899": ["n04560804", "water_jug"], "900": ["n04562935", "water_tower"], "901": ["n04579145", "whiskey_jug"], "902": ["n04579432", "whistle"], "903": ["n04584207", "wig"], "904": ["n04589890", "window_screen"], "905": ["n04590129", "window_shade"], "906": ["n04591157", "Windsor_tie"], "907": ["n04591713", "wine_bottle"], "908": ["n04592741", "wing"], "909": ["n04596742", "wok"], "910": ["n04597913", "wooden_spoon"], "911": ["n04599235", "wool"], "912": ["n04604644", "worm_fence"], "913": ["n04606251", "wreck"], "914": ["n04612504", "yawl"], "915": ["n04613696", "yurt"], "916": ["n06359193", "web_site"], "917": ["n06596364", "comic_book"], "918": ["n06785654", "crossword_puzzle"], "919": ["n06794110", "street_sign"], "920": ["n06874185", "traffic_light"], "921": ["n07248320", "book_jacket"], "922": ["n07565083", "menu"], "923": ["n07579787", "plate"], "924": ["n07583066", "guacamole"], "925": ["n07584110", "consomme"], "926": ["n07590611", "hot_pot"], "927": ["n07613480", "trifle"], "928": ["n07614500", "ice_cream"], "929": ["n07615774", "ice_lolly"], "930": ["n07684084", "French_loaf"], "931": ["n07693725", "bagel"], "932": ["n07695742", "pretzel"], "933": ["n07697313", "cheeseburger"], "934": ["n07697537", "hotdog"], "935": ["n07711569", "mashed_potato"], "936": ["n07714571", "head_cabbage"], "937": ["n07714990", "broccoli"], "938": ["n07715103", "cauliflower"], "939": ["n07716358", "zucchini"], "940": ["n07716906", "spaghetti_squash"], "941": ["n07717410", "acorn_squash"], "942": ["n07717556", "butternut_squash"], "943": ["n07718472", "cucumber"], "944": ["n07718747", "artichoke"], "945": ["n07720875", "bell_pepper"], "946": ["n07730033", "cardoon"], "947": ["n07734744", "mushroom"], "948": ["n07742313", "Granny_Smith"], "949": ["n07745940", "strawberry"], "950": ["n07747607", "orange"], "951": ["n07749582", "lemon"], "952": ["n07753113", "fig"], "953": ["n07753275", "pineapple"], "954": ["n07753592", "banana"], "955": ["n07754684", "jackfruit"], "956": ["n07760859", "custard_apple"], "957": ["n07768694", "pomegranate"], "958": ["n07802026", "hay"], "959": ["n07831146", "carbonara"], "960": ["n07836838", "chocolate_sauce"], "961": ["n07860988", "dough"], "962": ["n07871810", "meat_loaf"], "963": ["n07873807", "pizza"], "964": ["n07875152", "potpie"], "965": ["n07880968", "burrito"], "966": ["n07892512", "red_wine"], "967": ["n07920052", "espresso"], "968": ["n07930864", "cup"], "969": ["n07932039", "eggnog"], "970": ["n09193705", "alp"], "971": ["n09229709", "bubble"], "972": ["n09246464", "cliff"], "973": ["n09256479", "coral_reef"], "974": ["n09288635", "geyser"], "975": ["n09332890", "lakeside"], "976": ["n09399592", "promontory"], "977": ["n09421951", "sandbar"], "978": ["n09428293", "seashore"], "979": ["n09468604", "valley"], "980": ["n09472597", "volcano"], "981": ["n09835506", "ballplayer"], "982": ["n10148035", "groom"], "983": ["n10565667", "scuba_diver"], "984": ["n11879895", "rapeseed"], "985": ["n11939491", "daisy"], "986": ["n12057211", "yellow_lady's_slipper"], "987": ["n12144580", "corn"], "988": ["n12267677", "acorn"], "989": ["n12620546", "hip"], "990": ["n12768682", "buckeye"], "991": ["n12985857", "coral_fungus"], "992": ["n12998815", "agaric"], "993": ["n13037406", "gyromitra"], "994": ["n13040303", "stinkhorn"], "995": ["n13044778", "earthstar"], "996": ["n13052670", "hen-of-the-woods"], "997": ["n13054560", "bolete"], "998": ["n13133613", "ear"], "999": ["n15075141", "toilet_tissue"]}
\ No newline at end of file
diff --git a/_static/images/microsoft-logo.svg b/_static/images/microsoft-logo.svg
new file mode 100644
index 00000000000..a1a7ce2d7a7
--- /dev/null
+++ b/_static/images/microsoft-logo.svg
@@ -0,0 +1,80 @@
+
+
+
+
+
+
diff --git a/_static/img/8_workers.png b/_static/img/8_workers.png
new file mode 100644
index 00000000000..9a51182eb4b
Binary files /dev/null and b/_static/img/8_workers.png differ
diff --git a/_static/img/ExecuTorch-Logo-cropped.svg b/_static/img/ExecuTorch-Logo-cropped.svg
new file mode 100644
index 00000000000..9e0ef52fbd8
--- /dev/null
+++ b/_static/img/ExecuTorch-Logo-cropped.svg
@@ -0,0 +1,57 @@
+
+
+
+
+
+
+
+
+
+
+
diff --git a/_static/img/audio_preprocessing_tutorial_waveform.png b/_static/img/audio_preprocessing_tutorial_waveform.png
new file mode 100644
index 00000000000..320b53c996d
Binary files /dev/null and b/_static/img/audio_preprocessing_tutorial_waveform.png differ
diff --git a/_static/img/ax_logo.png b/_static/img/ax_logo.png
new file mode 100644
index 00000000000..ecb4302b524
Binary files /dev/null and b/_static/img/ax_logo.png differ
diff --git a/_static/img/ax_scheduler_illustration.png b/_static/img/ax_scheduler_illustration.png
new file mode 100644
index 00000000000..65e5a004a1b
Binary files /dev/null and b/_static/img/ax_scheduler_illustration.png differ
diff --git a/_static/img/basics/comp-graph.png b/_static/img/basics/comp-graph.png
new file mode 100644
index 00000000000..cfa6163d58a
Binary files /dev/null and b/_static/img/basics/comp-graph.png differ
diff --git a/_static/img/basics/fashion_mnist.png b/_static/img/basics/fashion_mnist.png
new file mode 100644
index 00000000000..213b1e1f17b
Binary files /dev/null and b/_static/img/basics/fashion_mnist.png differ
diff --git a/_static/img/basics/optimizationloops.png b/_static/img/basics/optimizationloops.png
new file mode 100644
index 00000000000..c43d83f2799
Binary files /dev/null and b/_static/img/basics/optimizationloops.png differ
diff --git a/_static/img/basics/typesdata.png b/_static/img/basics/typesdata.png
new file mode 100644
index 00000000000..5d0e0291eef
Binary files /dev/null and b/_static/img/basics/typesdata.png differ
diff --git a/_static/img/bert_mrpc.png b/_static/img/bert_mrpc.png
new file mode 100644
index 00000000000..fb0ff796f79
Binary files /dev/null and b/_static/img/bert_mrpc.png differ
diff --git a/_static/img/cat_224x224.jpg b/_static/img/cat_224x224.jpg
deleted file mode 100755
index 05660ce53f9..00000000000
Binary files a/_static/img/cat_224x224.jpg and /dev/null differ
diff --git a/_static/img/cat_output1.png b/_static/img/cat_output1.png
deleted file mode 100755
index 92694433024..00000000000
Binary files a/_static/img/cat_output1.png and /dev/null differ
diff --git a/_static/img/channels_last_memory_format.png b/_static/img/channels_last_memory_format.png
new file mode 100644
index 00000000000..d2b2922023f
Binary files /dev/null and b/_static/img/channels_last_memory_format.png differ
diff --git a/_static/img/chatbot/diff.png b/_static/img/chatbot/diff.png
old mode 100755
new mode 100644
index 525c426679d..fc3cc56789b
Binary files a/_static/img/chatbot/diff.png and b/_static/img/chatbot/diff.png differ
diff --git a/_static/img/chatbot/pytorch_workflow.png b/_static/img/chatbot/pytorch_workflow.png
old mode 100755
new mode 100644
index 1598490ddad..8a81f1995f0
Binary files a/_static/img/chatbot/pytorch_workflow.png and b/_static/img/chatbot/pytorch_workflow.png differ
diff --git a/_static/img/classic_memory_format.png b/_static/img/classic_memory_format.png
new file mode 100644
index 00000000000..65cff010d88
Binary files /dev/null and b/_static/img/classic_memory_format.png differ
diff --git a/_static/img/compiled_autograd/call_hook_node.png b/_static/img/compiled_autograd/call_hook_node.png
new file mode 100644
index 00000000000..3e094cf6f73
Binary files /dev/null and b/_static/img/compiled_autograd/call_hook_node.png differ
diff --git a/_static/img/compiled_autograd/entire_verbose_log.png b/_static/img/compiled_autograd/entire_verbose_log.png
new file mode 100644
index 00000000000..4ce2b8538ee
Binary files /dev/null and b/_static/img/compiled_autograd/entire_verbose_log.png differ
diff --git a/_static/img/compiled_autograd/recompile_due_to_dynamic.png b/_static/img/compiled_autograd/recompile_due_to_dynamic.png
new file mode 100644
index 00000000000..41ae56acf2d
Binary files /dev/null and b/_static/img/compiled_autograd/recompile_due_to_dynamic.png differ
diff --git a/_static/img/compiled_autograd/recompile_due_to_node.png b/_static/img/compiled_autograd/recompile_due_to_node.png
new file mode 100644
index 00000000000..800a1784587
Binary files /dev/null and b/_static/img/compiled_autograd/recompile_due_to_node.png differ
diff --git a/_static/img/dag_autograd.png b/_static/img/dag_autograd.png
new file mode 100644
index 00000000000..cdc50fed625
Binary files /dev/null and b/_static/img/dag_autograd.png differ
diff --git a/_static/img/deeplabv3_android.png b/_static/img/deeplabv3_android.png
new file mode 100644
index 00000000000..e0a451be8ef
Binary files /dev/null and b/_static/img/deeplabv3_android.png differ
diff --git a/_static/img/deeplabv3_android2.png b/_static/img/deeplabv3_android2.png
new file mode 100644
index 00000000000..0ae041479aa
Binary files /dev/null and b/_static/img/deeplabv3_android2.png differ
diff --git a/_static/img/deeplabv3_ios.png b/_static/img/deeplabv3_ios.png
new file mode 100644
index 00000000000..c901179e1ee
Binary files /dev/null and b/_static/img/deeplabv3_ios.png differ
diff --git a/_static/img/deeplabv3_ios2.png b/_static/img/deeplabv3_ios2.png
new file mode 100644
index 00000000000..3dc0073ca13
Binary files /dev/null and b/_static/img/deeplabv3_ios2.png differ
diff --git a/_static/img/distributed/DDP_memory.gif b/_static/img/distributed/DDP_memory.gif
new file mode 100644
index 00000000000..4049b9dba43
Binary files /dev/null and b/_static/img/distributed/DDP_memory.gif differ
diff --git a/_static/img/distributed/FSDP_autowrap.gif b/_static/img/distributed/FSDP_autowrap.gif
new file mode 100644
index 00000000000..d9e782d4c95
Binary files /dev/null and b/_static/img/distributed/FSDP_autowrap.gif differ
diff --git a/_static/img/distributed/FSDP_memory.gif b/_static/img/distributed/FSDP_memory.gif
new file mode 100644
index 00000000000..aece4e4b8a0
Binary files /dev/null and b/_static/img/distributed/FSDP_memory.gif differ
diff --git a/_static/img/distributed/device_mesh.png b/_static/img/distributed/device_mesh.png
new file mode 100644
index 00000000000..2ccabcc4824
Binary files /dev/null and b/_static/img/distributed/device_mesh.png differ
diff --git a/_static/img/distributed/distributed_checkpoint_generated_files.png b/_static/img/distributed/distributed_checkpoint_generated_files.png
new file mode 100644
index 00000000000..b32dddb7e62
Binary files /dev/null and b/_static/img/distributed/distributed_checkpoint_generated_files.png differ
diff --git a/_static/img/distributed/fsdp_implicit.png b/_static/img/distributed/fsdp_implicit.png
new file mode 100644
index 00000000000..85b19b7e72e
Binary files /dev/null and b/_static/img/distributed/fsdp_implicit.png differ
diff --git a/_static/img/distributed/fsdp_sharding.png b/_static/img/distributed/fsdp_sharding.png
new file mode 100755
index 00000000000..9dd1e3c111e
Binary files /dev/null and b/_static/img/distributed/fsdp_sharding.png differ
diff --git a/_static/img/distributed/fsdp_tp.png b/_static/img/distributed/fsdp_tp.png
new file mode 100644
index 00000000000..e419304ac7d
Binary files /dev/null and b/_static/img/distributed/fsdp_tp.png differ
diff --git a/_static/img/distributed/fsdp_workflow.png b/_static/img/distributed/fsdp_workflow.png
new file mode 100644
index 00000000000..1a8df0e44b6
Binary files /dev/null and b/_static/img/distributed/fsdp_workflow.png differ
diff --git a/_static/img/distributed/loss_parallel.png b/_static/img/distributed/loss_parallel.png
new file mode 100644
index 00000000000..b5cf9a499bc
Binary files /dev/null and b/_static/img/distributed/loss_parallel.png differ
diff --git a/_static/img/distributed/megatron_lm.png b/_static/img/distributed/megatron_lm.png
new file mode 100644
index 00000000000..38f7b06639f
Binary files /dev/null and b/_static/img/distributed/megatron_lm.png differ
diff --git a/_static/img/distributed/tcpstore_barrier_time.png b/_static/img/distributed/tcpstore_barrier_time.png
new file mode 100644
index 00000000000..5ece3a7471d
Binary files /dev/null and b/_static/img/distributed/tcpstore_barrier_time.png differ
diff --git a/_static/img/distributed/tcpstore_init_time.png b/_static/img/distributed/tcpstore_init_time.png
new file mode 100644
index 00000000000..df514b4dc48
Binary files /dev/null and b/_static/img/distributed/tcpstore_init_time.png differ
diff --git a/_static/img/flask.png b/_static/img/flask.png
new file mode 100644
index 00000000000..bad6738efdd
Binary files /dev/null and b/_static/img/flask.png differ
diff --git a/_static/img/half_cheetah.gif b/_static/img/half_cheetah.gif
new file mode 100644
index 00000000000..b61ff47d4e6
Binary files /dev/null and b/_static/img/half_cheetah.gif differ
diff --git a/_static/img/hta/comm_across_ranks.png b/_static/img/hta/comm_across_ranks.png
new file mode 100644
index 00000000000..2336de3bcbc
Binary files /dev/null and b/_static/img/hta/comm_across_ranks.png differ
diff --git a/_static/img/hta/counts_diff.png b/_static/img/hta/counts_diff.png
new file mode 100644
index 00000000000..34575c145de
Binary files /dev/null and b/_static/img/hta/counts_diff.png differ
diff --git a/_static/img/hta/cuda_kernel_launch.png b/_static/img/hta/cuda_kernel_launch.png
new file mode 100644
index 00000000000..e57c54a2fc5
Binary files /dev/null and b/_static/img/hta/cuda_kernel_launch.png differ
diff --git a/_static/img/hta/cuda_kernel_launch_stats.png b/_static/img/hta/cuda_kernel_launch_stats.png
new file mode 100644
index 00000000000..33a160fc752
Binary files /dev/null and b/_static/img/hta/cuda_kernel_launch_stats.png differ
diff --git a/_static/img/hta/duration_diff.png b/_static/img/hta/duration_diff.png
new file mode 100644
index 00000000000..050d491c872
Binary files /dev/null and b/_static/img/hta/duration_diff.png differ
diff --git a/_static/img/hta/idle_time.png b/_static/img/hta/idle_time.png
new file mode 100644
index 00000000000..782bfe9adb5
Binary files /dev/null and b/_static/img/hta/idle_time.png differ
diff --git a/_static/img/hta/idle_time_breakdown_percentage.png b/_static/img/hta/idle_time_breakdown_percentage.png
new file mode 100644
index 00000000000..3bab5946eab
Binary files /dev/null and b/_static/img/hta/idle_time_breakdown_percentage.png differ
diff --git a/_static/img/hta/idle_time_summary.png b/_static/img/hta/idle_time_summary.png
new file mode 100644
index 00000000000..101b696b534
Binary files /dev/null and b/_static/img/hta/idle_time_summary.png differ
diff --git a/_static/img/hta/kernel_metrics_df.png b/_static/img/hta/kernel_metrics_df.png
new file mode 100644
index 00000000000..53eefb58b0c
Binary files /dev/null and b/_static/img/hta/kernel_metrics_df.png differ
diff --git a/_static/img/hta/kernel_type_breakdown.png b/_static/img/hta/kernel_type_breakdown.png
new file mode 100644
index 00000000000..29a29cf89b2
Binary files /dev/null and b/_static/img/hta/kernel_type_breakdown.png differ
diff --git a/_static/img/hta/launch_delay_outliers.png b/_static/img/hta/launch_delay_outliers.png
new file mode 100644
index 00000000000..9bb455adea4
Binary files /dev/null and b/_static/img/hta/launch_delay_outliers.png differ
diff --git a/_static/img/hta/mem_bandwidth_queue_length.png b/_static/img/hta/mem_bandwidth_queue_length.png
new file mode 100644
index 00000000000..9df5383b5d9
Binary files /dev/null and b/_static/img/hta/mem_bandwidth_queue_length.png differ
diff --git a/_static/img/hta/overlap_df.png b/_static/img/hta/overlap_df.png
new file mode 100644
index 00000000000..ef164a28a12
Binary files /dev/null and b/_static/img/hta/overlap_df.png differ
diff --git a/_static/img/hta/overlap_plot.png b/_static/img/hta/overlap_plot.png
new file mode 100644
index 00000000000..acd449bc7ff
Binary files /dev/null and b/_static/img/hta/overlap_plot.png differ
diff --git a/_static/img/hta/pie_charts.png b/_static/img/hta/pie_charts.png
new file mode 100644
index 00000000000..fa9137109a6
Binary files /dev/null and b/_static/img/hta/pie_charts.png differ
diff --git a/_static/img/hta/queue_length_summary.png b/_static/img/hta/queue_length_summary.png
new file mode 100644
index 00000000000..639a03fb6d1
Binary files /dev/null and b/_static/img/hta/queue_length_summary.png differ
diff --git a/_static/img/hta/runtime_outliers.png b/_static/img/hta/runtime_outliers.png
new file mode 100644
index 00000000000..1e2dfff9006
Binary files /dev/null and b/_static/img/hta/runtime_outliers.png differ
diff --git a/_static/img/hta/short_gpu_kernels.png b/_static/img/hta/short_gpu_kernels.png
new file mode 100644
index 00000000000..ff382a3a7f0
Binary files /dev/null and b/_static/img/hta/short_gpu_kernels.png differ
diff --git a/_static/img/hta/temporal_breakdown_df.png b/_static/img/hta/temporal_breakdown_df.png
new file mode 100644
index 00000000000..dce1829d113
Binary files /dev/null and b/_static/img/hta/temporal_breakdown_df.png differ
diff --git a/_static/img/hta/temporal_breakdown_plot.png b/_static/img/hta/temporal_breakdown_plot.png
new file mode 100644
index 00000000000..9c5f45c1d35
Binary files /dev/null and b/_static/img/hta/temporal_breakdown_plot.png differ
diff --git a/_static/img/hybrid.png b/_static/img/hybrid.png
deleted file mode 100755
index d4dc016d863..00000000000
Binary files a/_static/img/hybrid.png and /dev/null differ
diff --git a/_static/img/install_msvc.png b/_static/img/install_msvc.png
new file mode 100644
index 00000000000..fce73207a80
Binary files /dev/null and b/_static/img/install_msvc.png differ
diff --git a/_static/img/invpendulum.gif b/_static/img/invpendulum.gif
new file mode 100644
index 00000000000..3102c5b55cf
Binary files /dev/null and b/_static/img/invpendulum.gif differ
diff --git a/_static/img/itt_tutorial/vtune_config.png b/_static/img/itt_tutorial/vtune_config.png
new file mode 100755
index 00000000000..9f3c4605022
Binary files /dev/null and b/_static/img/itt_tutorial/vtune_config.png differ
diff --git a/_static/img/itt_tutorial/vtune_start.png b/_static/img/itt_tutorial/vtune_start.png
new file mode 100755
index 00000000000..9460df7c5f3
Binary files /dev/null and b/_static/img/itt_tutorial/vtune_start.png differ
diff --git a/_static/img/itt_tutorial/vtune_timeline.png b/_static/img/itt_tutorial/vtune_timeline.png
new file mode 100755
index 00000000000..1f1f018e3fa
Binary files /dev/null and b/_static/img/itt_tutorial/vtune_timeline.png differ
diff --git a/_static/img/itt_tutorial/vtune_xpu_config.png b/_static/img/itt_tutorial/vtune_xpu_config.png
new file mode 100644
index 00000000000..80dd1812d26
Binary files /dev/null and b/_static/img/itt_tutorial/vtune_xpu_config.png differ
diff --git a/_static/img/itt_tutorial/vtune_xpu_timeline.png b/_static/img/itt_tutorial/vtune_xpu_timeline.png
new file mode 100644
index 00000000000..43818cf105c
Binary files /dev/null and b/_static/img/itt_tutorial/vtune_xpu_timeline.png differ
diff --git a/_static/img/knowledge_distillation/ce_only.png b/_static/img/knowledge_distillation/ce_only.png
new file mode 100644
index 00000000000..a7503716575
Binary files /dev/null and b/_static/img/knowledge_distillation/ce_only.png differ
diff --git a/_static/img/knowledge_distillation/cosine_embedding_loss.png b/_static/img/knowledge_distillation/cosine_embedding_loss.png
new file mode 100644
index 00000000000..ebfd957a250
Binary files /dev/null and b/_static/img/knowledge_distillation/cosine_embedding_loss.png differ
diff --git a/_static/img/knowledge_distillation/cosine_loss_distillation.png b/_static/img/knowledge_distillation/cosine_loss_distillation.png
new file mode 100644
index 00000000000..81f241eb07f
Binary files /dev/null and b/_static/img/knowledge_distillation/cosine_loss_distillation.png differ
diff --git a/_static/img/knowledge_distillation/distillation_output_loss.png b/_static/img/knowledge_distillation/distillation_output_loss.png
new file mode 100644
index 00000000000..f86cbddbdfd
Binary files /dev/null and b/_static/img/knowledge_distillation/distillation_output_loss.png differ
diff --git a/_static/img/knowledge_distillation/fitnets_knowledge_distill.png b/_static/img/knowledge_distillation/fitnets_knowledge_distill.png
new file mode 100644
index 00000000000..407d9de89f6
Binary files /dev/null and b/_static/img/knowledge_distillation/fitnets_knowledge_distill.png differ
diff --git a/_static/img/mario.gif b/_static/img/mario.gif
new file mode 100644
index 00000000000..95d8c0cb172
Binary files /dev/null and b/_static/img/mario.gif differ
diff --git a/_static/img/mario_env.png b/_static/img/mario_env.png
new file mode 100644
index 00000000000..b6fc09c3c8c
Binary files /dev/null and b/_static/img/mario_env.png differ
diff --git a/_static/img/memory_format_logo.png b/_static/img/memory_format_logo.png
new file mode 100644
index 00000000000..6d1043ed29a
Binary files /dev/null and b/_static/img/memory_format_logo.png differ
diff --git a/_static/img/mnist.png b/_static/img/mnist.png
old mode 100755
new mode 100644
index a85fc423984..53c876a89d5
Binary files a/_static/img/mnist.png and b/_static/img/mnist.png differ
diff --git a/_static/img/named_tensor.png b/_static/img/named_tensor.png
new file mode 100644
index 00000000000..2efceb9f516
Binary files /dev/null and b/_static/img/named_tensor.png differ
diff --git a/_static/img/nvfuser_intro/nvfuser_transformer_block.png b/_static/img/nvfuser_intro/nvfuser_transformer_block.png
new file mode 100755
index 00000000000..8dd88bbdf94
Binary files /dev/null and b/_static/img/nvfuser_intro/nvfuser_transformer_block.png differ
diff --git a/_static/img/nvfuser_intro/nvfuser_tutorial_0.png b/_static/img/nvfuser_intro/nvfuser_tutorial_0.png
new file mode 100755
index 00000000000..d3448d192bc
Binary files /dev/null and b/_static/img/nvfuser_intro/nvfuser_tutorial_0.png differ
diff --git a/_static/img/nvfuser_intro/nvfuser_tutorial_1.png b/_static/img/nvfuser_intro/nvfuser_tutorial_1.png
new file mode 100755
index 00000000000..4752695fa91
Binary files /dev/null and b/_static/img/nvfuser_intro/nvfuser_tutorial_1.png differ
diff --git a/_static/img/nvfuser_intro/nvfuser_tutorial_2.png b/_static/img/nvfuser_intro/nvfuser_tutorial_2.png
new file mode 100755
index 00000000000..ec45793d67d
Binary files /dev/null and b/_static/img/nvfuser_intro/nvfuser_tutorial_2.png differ
diff --git a/_static/img/nvfuser_intro/nvfuser_tutorial_3.png b/_static/img/nvfuser_intro/nvfuser_tutorial_3.png
new file mode 100755
index 00000000000..be529d93259
Binary files /dev/null and b/_static/img/nvfuser_intro/nvfuser_tutorial_3.png differ
diff --git a/_static/img/nvfuser_intro/nvfuser_tutorial_4.png b/_static/img/nvfuser_intro/nvfuser_tutorial_4.png
new file mode 100755
index 00000000000..f2e7c3ff339
Binary files /dev/null and b/_static/img/nvfuser_intro/nvfuser_tutorial_4.png differ
diff --git a/_static/img/nvfuser_intro/nvfuser_tutorial_5.png b/_static/img/nvfuser_intro/nvfuser_tutorial_5.png
new file mode 100755
index 00000000000..efe43d73741
Binary files /dev/null and b/_static/img/nvfuser_intro/nvfuser_tutorial_5.png differ
diff --git a/_static/img/nvfuser_intro/nvfuser_tutorial_6.png b/_static/img/nvfuser_intro/nvfuser_tutorial_6.png
new file mode 100755
index 00000000000..59a54a87f86
Binary files /dev/null and b/_static/img/nvfuser_intro/nvfuser_tutorial_6.png differ
diff --git a/_static/img/oneworker.png b/_static/img/oneworker.png
new file mode 100644
index 00000000000..255ec584834
Binary files /dev/null and b/_static/img/oneworker.png differ
diff --git a/_static/img/onnx/image_classifier_onnx_model_on_netron_web_ui.png b/_static/img/onnx/image_classifier_onnx_model_on_netron_web_ui.png
new file mode 100644
index 00000000000..6430e4943ff
Binary files /dev/null and b/_static/img/onnx/image_classifier_onnx_model_on_netron_web_ui.png differ
diff --git a/_static/img/onnx/netron_web_ui.png b/_static/img/onnx/netron_web_ui.png
new file mode 100755
index 00000000000..f88936eb824
Binary files /dev/null and b/_static/img/onnx/netron_web_ui.png differ
diff --git a/_static/img/optim_step_in_bwd/snapshot.jpg b/_static/img/optim_step_in_bwd/snapshot.jpg
new file mode 100644
index 00000000000..50be55e7b9a
Binary files /dev/null and b/_static/img/optim_step_in_bwd/snapshot.jpg differ
diff --git a/_static/img/optim_step_in_bwd/snapshot_opt_in_bwd.jpg b/_static/img/optim_step_in_bwd/snapshot_opt_in_bwd.jpg
new file mode 100644
index 00000000000..65d53d21c38
Binary files /dev/null and b/_static/img/optim_step_in_bwd/snapshot_opt_in_bwd.jpg differ
diff --git a/_static/img/pendulum.gif b/_static/img/pendulum.gif
new file mode 100644
index 00000000000..a7adf181fc8
Binary files /dev/null and b/_static/img/pendulum.gif differ
diff --git a/_static/img/per_channel_quant.png b/_static/img/per_channel_quant.png
new file mode 100644
index 00000000000..e28810aca3e
Binary files /dev/null and b/_static/img/per_channel_quant.png differ
diff --git a/_static/img/per_tensor_quant.png b/_static/img/per_tensor_quant.png
new file mode 100644
index 00000000000..183bab6fa3b
Binary files /dev/null and b/_static/img/per_tensor_quant.png differ
diff --git a/_static/img/perf_viz.png b/_static/img/perf_viz.png
new file mode 100644
index 00000000000..85608557bcb
Binary files /dev/null and b/_static/img/perf_viz.png differ
diff --git a/_static/img/pinmem/pinmem.png b/_static/img/pinmem/pinmem.png
new file mode 100644
index 00000000000..9d84e9d229d
Binary files /dev/null and b/_static/img/pinmem/pinmem.png differ
diff --git a/_static/img/pinmem/trace_streamed0_pinned0.png b/_static/img/pinmem/trace_streamed0_pinned0.png
new file mode 100644
index 00000000000..dedac997b0b
Binary files /dev/null and b/_static/img/pinmem/trace_streamed0_pinned0.png differ
diff --git a/_static/img/pinmem/trace_streamed0_pinned1.png b/_static/img/pinmem/trace_streamed0_pinned1.png
new file mode 100644
index 00000000000..2d5ff462e1a
Binary files /dev/null and b/_static/img/pinmem/trace_streamed0_pinned1.png differ
diff --git a/_static/img/pinmem/trace_streamed1_pinned0.png b/_static/img/pinmem/trace_streamed1_pinned0.png
new file mode 100644
index 00000000000..130182a1978
Binary files /dev/null and b/_static/img/pinmem/trace_streamed1_pinned0.png differ
diff --git a/_static/img/pinmem/trace_streamed1_pinned1.png b/_static/img/pinmem/trace_streamed1_pinned1.png
new file mode 100644
index 00000000000..c596fcdb691
Binary files /dev/null and b/_static/img/pinmem/trace_streamed1_pinned1.png differ
diff --git a/_static/img/profiler_callstack.png b/_static/img/profiler_callstack.png
new file mode 100644
index 00000000000..835673ba63c
Binary files /dev/null and b/_static/img/profiler_callstack.png differ
diff --git a/_static/img/profiler_distributed_view.png b/_static/img/profiler_distributed_view.png
new file mode 100644
index 00000000000..2b0d5565131
Binary files /dev/null and b/_static/img/profiler_distributed_view.png differ
diff --git a/_static/img/profiler_kernel_view.png b/_static/img/profiler_kernel_view.png
new file mode 100644
index 00000000000..cfe01b83a0d
Binary files /dev/null and b/_static/img/profiler_kernel_view.png differ
diff --git a/_static/img/profiler_memory_curve_selecting.png b/_static/img/profiler_memory_curve_selecting.png
new file mode 100644
index 00000000000..b5dc0c10e9b
Binary files /dev/null and b/_static/img/profiler_memory_curve_selecting.png differ
diff --git a/_static/img/profiler_memory_curve_single.png b/_static/img/profiler_memory_curve_single.png
new file mode 100644
index 00000000000..c12d480ac40
Binary files /dev/null and b/_static/img/profiler_memory_curve_single.png differ
diff --git a/_static/img/profiler_memory_view.png b/_static/img/profiler_memory_view.png
new file mode 100644
index 00000000000..4839505ab8c
Binary files /dev/null and b/_static/img/profiler_memory_view.png differ
diff --git a/_static/img/profiler_operator_view.png b/_static/img/profiler_operator_view.png
new file mode 100644
index 00000000000..e3e60b03025
Binary files /dev/null and b/_static/img/profiler_operator_view.png differ
diff --git a/_static/img/profiler_overview1.png b/_static/img/profiler_overview1.png
new file mode 100644
index 00000000000..01eef8fda68
Binary files /dev/null and b/_static/img/profiler_overview1.png differ
diff --git a/_static/img/profiler_overview2.png b/_static/img/profiler_overview2.png
new file mode 100644
index 00000000000..cc7826b352a
Binary files /dev/null and b/_static/img/profiler_overview2.png differ
diff --git a/_static/img/profiler_rocm_chrome_trace_view.png b/_static/img/profiler_rocm_chrome_trace_view.png
new file mode 100644
index 00000000000..cff7ba98c8a
Binary files /dev/null and b/_static/img/profiler_rocm_chrome_trace_view.png differ
diff --git a/_static/img/profiler_rocm_tensorboard_operartor_view.png b/_static/img/profiler_rocm_tensorboard_operartor_view.png
new file mode 100644
index 00000000000..27effb91e7c
Binary files /dev/null and b/_static/img/profiler_rocm_tensorboard_operartor_view.png differ
diff --git a/_static/img/profiler_trace_view1.png b/_static/img/profiler_trace_view1.png
new file mode 100644
index 00000000000..215fe03e724
Binary files /dev/null and b/_static/img/profiler_trace_view1.png differ
diff --git a/_static/img/profiler_trace_view2.png b/_static/img/profiler_trace_view2.png
new file mode 100644
index 00000000000..790ef5d58ea
Binary files /dev/null and b/_static/img/profiler_trace_view2.png differ
diff --git a/_static/img/profiler_trace_view_fwd_bwd.png b/_static/img/profiler_trace_view_fwd_bwd.png
new file mode 100644
index 00000000000..c773b829e5d
Binary files /dev/null and b/_static/img/profiler_trace_view_fwd_bwd.png differ
diff --git a/_static/img/profiler_views_list.png b/_static/img/profiler_views_list.png
new file mode 100644
index 00000000000..040f392e366
Binary files /dev/null and b/_static/img/profiler_views_list.png differ
diff --git a/_static/img/profiler_vscode.png b/_static/img/profiler_vscode.png
new file mode 100644
index 00000000000..afb99f06937
Binary files /dev/null and b/_static/img/profiler_vscode.png differ
diff --git a/_static/img/pruning.png b/_static/img/pruning.png
new file mode 100644
index 00000000000..7359f11e9a6
Binary files /dev/null and b/_static/img/pruning.png differ
diff --git a/_static/img/pruning_flow.jpg b/_static/img/pruning_flow.jpg
new file mode 100644
index 00000000000..bd57158b302
Binary files /dev/null and b/_static/img/pruning_flow.jpg differ
diff --git a/_static/img/python_extension_autoload_impl.png b/_static/img/python_extension_autoload_impl.png
new file mode 100644
index 00000000000..64e18fc7b4b
Binary files /dev/null and b/_static/img/python_extension_autoload_impl.png differ
diff --git a/_static/img/qat.png b/_static/img/qat.png
new file mode 100644
index 00000000000..e8ca311745c
Binary files /dev/null and b/_static/img/qat.png differ
diff --git a/_static/img/quant_embeddings.png b/_static/img/quant_embeddings.png
new file mode 100644
index 00000000000..035561465a2
Binary files /dev/null and b/_static/img/quant_embeddings.png differ
diff --git a/_static/img/ray-tune.png b/_static/img/ray-tune.png
new file mode 100644
index 00000000000..febd6de282e
Binary files /dev/null and b/_static/img/ray-tune.png differ
diff --git a/_static/img/reinforcement_learning_diagram.drawio b/_static/img/reinforcement_learning_diagram.drawio
new file mode 100644
index 00000000000..2ff4e6f0270
--- /dev/null
+++ b/_static/img/reinforcement_learning_diagram.drawio
@@ -0,0 +1 @@
+5Vpbc+MmFP41nmkfmpGEpMiPjTftzrTZZtbbbbYvHSxhiRQJFeHb/vqChG4gx95ElqfTeCaGwwEO37lwDskMLNL9zwzmyQONEJk5VrSfgXczx7GtuSW+JOVQUXzXrggxw5FiaglL/BXVMxV1gyNU9Bg5pYTjvE8MaZahkPdokDG667OtKenvmsMYGYRlCIlJ/QNHPKmogWe19PcIx0m9s22pkRTWzIpQJDCiuw4J3M/AglHKq1a6XyAiwatxqeb9dGS0EYyhjJ8z4flQfCient3PwfKXVfTn40P6/vMPapUtJBt14I8oJ/AgaA8opeygZOeHGhBGN1mE5JrWDNztEszRMoehHN0JExC0hKdE9GzRXGNCFpRQVs4Fa09+BD1mMMJC7s7YqvyIsYIz+jfqjPjljxhRsiLG0f4oCHYDrbBJRFPE5SEsNcGt1aPMMVDdXatbu1ZY0tGrr2hQmVPcrNwiLhoK9G9QgGMo4Lec41T6gWN9535v4C/WFLaPTmM/AlrebR8t2z0TruBScAEDLgaziKaCtoI8TAy4wg3bltYqIUFZ9KOMCaIbElgUOOxD1rftai0UGQFCg0/sRzcsRKfdjEMWI37KGkx1dOD2BtCuaQwRyPG2L+6QCtQOjxSLgxz1DRdoWqyOqWZ1I42+kNdfCOjmUOFgLCR0U0aemi2XDMVxgQ3ztK0X5fJtjR/0+EWjkqA1z0YHr7dYz7DYR0pwKM/5AfFRw2sEUbAOh8PrLYI+sgbDaxig1foy4dWxrh1fAzOACodeqi5lPKExzSC5b6laGGh5fqU0V8g/I84PKluBG06HQu8okcN/W+Q4OyS8CWTfsPFPlbz/Cxu/eg5hm0nEmPcg2mP+JNs3nup96Yy823c7h/HvTu8/cXfqV9H8lXen5xxJuUa+O91A2yd4+e709LvTP8Hvvsh/mbvWNtPDUR0hE+I9NVNF50vrFrLb+kLZq52hcaCO+9hTuY9zpvt413Qf3Vqc29e6z1xbyJkm9TSKb51fS4mdKVJP2zXc4fc8grwsLb3rlpaODog3cItaU96ijUdNmirWscHuXq03jjdVeLDnZ8aHI+qcJsGspewFdp8Iee8ivJU7Ehxn5YD/z0a+qN0RtOZtT7Ri9Q1Tac3ZqsjLvvWJQZzhLBbNUmtqXSFnuXQzb5zd7Bvxa5FQWkgvbB4vJDxCgXVlCEOOaXZhURwpylJQiRQFZdsL7wfkfh9RSFkkGql6XrQ2KiRddG9X7t2+rF10L6/ElUpu5VZ/ZWUt1D/piuk76/K8pWyq5S+lHiVi23oGaA9E7PlAxG4Yxw/ZZr4X1q5Vu9AE6V8wP5UAyt4jYlgcG7HrlUhVGL1WkgeO5EDf/r5oDdcuo9dIeqUPXk7ygK/xn3iPNACxJkgKHTMpHNVJBmod6+Z2snzmqmWMrlCgVx/nWjjQLc+7jIUDvYw5ZeFA43emsFjzCf0iYd2ava6q7z2LTVbX18XdyaDvX9UjNIMBevl2tkdo71VATyrG8ghd4LcV6qLb/oW/Ym//TwLc/ws=
\ No newline at end of file
diff --git a/_static/img/reinforcement_learning_diagram.jpg b/_static/img/reinforcement_learning_diagram.jpg
index bdcbc322502..7e04efc2534 100644
Binary files a/_static/img/reinforcement_learning_diagram.jpg and b/_static/img/reinforcement_learning_diagram.jpg differ
diff --git a/_static/img/replaybuffer_traj.png b/_static/img/replaybuffer_traj.png
new file mode 100644
index 00000000000..64773ee8f78
Binary files /dev/null and b/_static/img/replaybuffer_traj.png differ
diff --git a/_static/img/rollout_recurrent.png b/_static/img/rollout_recurrent.png
new file mode 100644
index 00000000000..2ce24d40d23
Binary files /dev/null and b/_static/img/rollout_recurrent.png differ
diff --git a/_static/img/rpc-images/batch.png b/_static/img/rpc-images/batch.png
new file mode 100644
index 00000000000..cde410d1bd1
Binary files /dev/null and b/_static/img/rpc-images/batch.png differ
diff --git a/_static/img/rpc_trace_img.png b/_static/img/rpc_trace_img.png
new file mode 100644
index 00000000000..4faaf97ad47
Binary files /dev/null and b/_static/img/rpc_trace_img.png differ
diff --git a/_static/img/sample_file.jpeg b/_static/img/sample_file.jpeg
new file mode 100644
index 00000000000..a7b314bd969
Binary files /dev/null and b/_static/img/sample_file.jpeg differ
diff --git a/_static/img/seq-seq-images/attention-decoder-network.png b/_static/img/seq-seq-images/attention-decoder-network.png
index 243f87c6e97..d31d42a5af1 100755
Binary files a/_static/img/seq-seq-images/attention-decoder-network.png and b/_static/img/seq-seq-images/attention-decoder-network.png differ
diff --git a/_static/img/steam-train-whistle-daniel_simon-converted-from-mp3.wav b/_static/img/steam-train-whistle-daniel_simon-converted-from-mp3.wav
new file mode 100644
index 00000000000..3f899c9dadd
Binary files /dev/null and b/_static/img/steam-train-whistle-daniel_simon-converted-from-mp3.wav differ
diff --git a/_static/img/tensorboard_figure.png b/_static/img/tensorboard_figure.png
new file mode 100644
index 00000000000..e4dd38e98da
Binary files /dev/null and b/_static/img/tensorboard_figure.png differ
diff --git a/_static/img/tensorboard_first_view.png b/_static/img/tensorboard_first_view.png
new file mode 100644
index 00000000000..702c8158a82
Binary files /dev/null and b/_static/img/tensorboard_first_view.png differ
diff --git a/_static/img/tensorboard_images.png b/_static/img/tensorboard_images.png
new file mode 100644
index 00000000000..79b7e15a7af
Binary files /dev/null and b/_static/img/tensorboard_images.png differ
diff --git a/_static/img/tensorboard_model_viz.png b/_static/img/tensorboard_model_viz.png
new file mode 100644
index 00000000000..2cd22344f15
Binary files /dev/null and b/_static/img/tensorboard_model_viz.png differ
diff --git a/_static/img/tensorboard_pr_curves.png b/_static/img/tensorboard_pr_curves.png
new file mode 100644
index 00000000000..0360187eae6
Binary files /dev/null and b/_static/img/tensorboard_pr_curves.png differ
diff --git a/_static/img/tensorboard_projector.png b/_static/img/tensorboard_projector.png
new file mode 100644
index 00000000000..f709efc32d0
Binary files /dev/null and b/_static/img/tensorboard_projector.png differ
diff --git a/_static/img/tensorboard_scalar_runs.png b/_static/img/tensorboard_scalar_runs.png
new file mode 100644
index 00000000000..f89ace713aa
Binary files /dev/null and b/_static/img/tensorboard_scalar_runs.png differ
diff --git a/_static/img/text_sentiment_ngrams_model.png b/_static/img/text_sentiment_ngrams_model.png
new file mode 100644
index 00000000000..94fdf554047
Binary files /dev/null and b/_static/img/text_sentiment_ngrams_model.png differ
diff --git a/_static/img/thumbnails/captum_teaser.png b/_static/img/thumbnails/captum_teaser.png
new file mode 100644
index 00000000000..c7fcb2c093a
Binary files /dev/null and b/_static/img/thumbnails/captum_teaser.png differ
diff --git a/_static/img/thumbnails/cropped/60-min-blitz.png b/_static/img/thumbnails/cropped/60-min-blitz.png
new file mode 100644
index 00000000000..681a16d9935
Binary files /dev/null and b/_static/img/thumbnails/cropped/60-min-blitz.png differ
diff --git a/_static/img/thumbnails/cropped/Adversarial-Example-Generation.png b/_static/img/thumbnails/cropped/Adversarial-Example-Generation.png
new file mode 100644
index 00000000000..ad5014e805c
Binary files /dev/null and b/_static/img/thumbnails/cropped/Adversarial-Example-Generation.png differ
diff --git a/_static/img/thumbnails/cropped/Autograd-in-Cpp-Frontend.png b/_static/img/thumbnails/cropped/Autograd-in-Cpp-Frontend.png
new file mode 100644
index 00000000000..3aec75031ae
Binary files /dev/null and b/_static/img/thumbnails/cropped/Autograd-in-Cpp-Frontend.png differ
diff --git a/_static/img/thumbnails/cropped/Combining-Distributed-DataParallel-with-Distributed-RPC-Framework.png b/_static/img/thumbnails/cropped/Combining-Distributed-DataParallel-with-Distributed-RPC-Framework.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Combining-Distributed-DataParallel-with-Distributed-RPC-Framework.png differ
diff --git a/_static/img/thumbnails/cropped/Custom-Cpp-and-CUDA-Extensions.png b/_static/img/thumbnails/cropped/Custom-Cpp-and-CUDA-Extensions.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Custom-Cpp-and-CUDA-Extensions.png differ
diff --git a/_static/img/thumbnails/cropped/Customize-Process-Group-Backends-Using-Cpp-Extensions.png b/_static/img/thumbnails/cropped/Customize-Process-Group-Backends-Using-Cpp-Extensions.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Customize-Process-Group-Backends-Using-Cpp-Extensions.png differ
diff --git a/_static/img/thumbnails/cropped/DCGAN-Tutorial.png b/_static/img/thumbnails/cropped/DCGAN-Tutorial.png
new file mode 100644
index 00000000000..a0c89096e9a
Binary files /dev/null and b/_static/img/thumbnails/cropped/DCGAN-Tutorial.png differ
diff --git a/_static/img/thumbnails/cropped/Deploying-PyTorch-in-Python-via-a-REST-API-with-Flask.png b/_static/img/thumbnails/cropped/Deploying-PyTorch-in-Python-via-a-REST-API-with-Flask.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Deploying-PyTorch-in-Python-via-a-REST-API-with-Flask.png differ
diff --git a/_static/img/thumbnails/cropped/Distributed-Pipeline-Parallelism-Using-RPC.png b/_static/img/thumbnails/cropped/Distributed-Pipeline-Parallelism-Using-RPC.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Distributed-Pipeline-Parallelism-Using-RPC.png differ
diff --git a/_static/img/thumbnails/cropped/Exporting-PyTorch-Models-to-ONNX-Graphs.png b/_static/img/thumbnails/cropped/Exporting-PyTorch-Models-to-ONNX-Graphs.png
new file mode 100755
index 00000000000..00156df042e
Binary files /dev/null and b/_static/img/thumbnails/cropped/Exporting-PyTorch-Models-to-ONNX-Graphs.png differ
diff --git a/_static/img/thumbnails/cropped/Extending-TorchScript-with-Custom-Cpp-Classes.png b/_static/img/thumbnails/cropped/Extending-TorchScript-with-Custom-Cpp-Classes.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Extending-TorchScript-with-Custom-Cpp-Classes.png differ
diff --git a/_static/img/thumbnails/cropped/Extending-TorchScript-with-Custom-Cpp-Operators.png b/_static/img/thumbnails/cropped/Extending-TorchScript-with-Custom-Cpp-Operators.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Extending-TorchScript-with-Custom-Cpp-Operators.png differ
diff --git a/_static/img/thumbnails/cropped/Getting Started with Distributed-RPC-Framework.png b/_static/img/thumbnails/cropped/Getting Started with Distributed-RPC-Framework.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Getting Started with Distributed-RPC-Framework.png differ
diff --git a/_static/img/thumbnails/cropped/Getting-Started-with Distributed RPC Framework.png b/_static/img/thumbnails/cropped/Getting-Started-with Distributed RPC Framework.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Getting-Started-with Distributed RPC Framework.png differ
diff --git a/_static/img/thumbnails/cropped/Getting-Started-with-DCP.png b/_static/img/thumbnails/cropped/Getting-Started-with-DCP.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Getting-Started-with-DCP.png differ
diff --git a/_static/img/thumbnails/cropped/Getting-Started-with-Distributed-Data-Parallel.png b/_static/img/thumbnails/cropped/Getting-Started-with-Distributed-Data-Parallel.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Getting-Started-with-Distributed-Data-Parallel.png differ
diff --git a/_static/img/thumbnails/cropped/Getting-Started-with-Distributed-RPC-Framework.png b/_static/img/thumbnails/cropped/Getting-Started-with-Distributed-RPC-Framework.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Getting-Started-with-Distributed-RPC-Framework.png differ
diff --git a/_static/img/thumbnails/cropped/Getting-Started-with-FSDP.png b/_static/img/thumbnails/cropped/Getting-Started-with-FSDP.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Getting-Started-with-FSDP.png differ
diff --git a/_static/img/thumbnails/cropped/Implementing-Batch-RPC-Processing-Using-Asynchronous-Executions.png b/_static/img/thumbnails/cropped/Implementing-Batch-RPC-Processing-Using-Asynchronous-Executions.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Implementing-Batch-RPC-Processing-Using-Asynchronous-Executions.png differ
diff --git a/_static/img/thumbnails/cropped/Implementing-a-Parameter-Server-Using-Distributed-RPC-Framework.png b/_static/img/thumbnails/cropped/Implementing-a-Parameter-Server-Using-Distributed-RPC-Framework.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Implementing-a-Parameter-Server-Using-Distributed-RPC-Framework.png differ
diff --git a/_static/img/thumbnails/cropped/Introduction-to-TorchScript.png b/_static/img/thumbnails/cropped/Introduction-to-TorchScript.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Introduction-to-TorchScript.png differ
diff --git a/_static/img/thumbnails/cropped/Language-Translation-with-TorchText.png b/_static/img/thumbnails/cropped/Language-Translation-with-TorchText.png
new file mode 100644
index 00000000000..9330c6cbdb5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Language-Translation-with-TorchText.png differ
diff --git a/_static/img/thumbnails/cropped/Large-Scale-Transformer-model-training-with-Tensor-Parallel.png b/_static/img/thumbnails/cropped/Large-Scale-Transformer-model-training-with-Tensor-Parallel.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Large-Scale-Transformer-model-training-with-Tensor-Parallel.png differ
diff --git a/_static/img/thumbnails/cropped/Loading-a-TorchScript-Model-in-Cpp.png b/_static/img/thumbnails/cropped/Loading-a-TorchScript-Model-in-Cpp.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Loading-a-TorchScript-Model-in-Cpp.png differ
diff --git a/_static/img/thumbnails/cropped/Model-Parallel-Best-Practices.png b/_static/img/thumbnails/cropped/Model-Parallel-Best-Practices.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Model-Parallel-Best-Practices.png differ
diff --git a/_static/img/thumbnails/cropped/NLP-From-Scratch-Classifying-Names-with-a-Character-Level-RNN.png b/_static/img/thumbnails/cropped/NLP-From-Scratch-Classifying-Names-with-a-Character-Level-RNN.png
new file mode 100644
index 00000000000..0aa02de9a53
Binary files /dev/null and b/_static/img/thumbnails/cropped/NLP-From-Scratch-Classifying-Names-with-a-Character-Level-RNN.png differ
diff --git a/_static/img/thumbnails/cropped/NLP-From-Scratch-Generating-Names-with-a-Character-Level-RNN.png b/_static/img/thumbnails/cropped/NLP-From-Scratch-Generating-Names-with-a-Character-Level-RNN.png
new file mode 100644
index 00000000000..a63d82ba4b4
Binary files /dev/null and b/_static/img/thumbnails/cropped/NLP-From-Scratch-Generating-Names-with-a-Character-Level-RNN.png differ
diff --git a/_static/img/thumbnails/cropped/NLP-From-Scratch-Translation-with-a-Sequence-to-Sequence-Network-and-Attention.png b/_static/img/thumbnails/cropped/NLP-From-Scratch-Translation-with-a-Sequence-to-Sequence-Network-and-Attention.png
new file mode 100644
index 00000000000..11d4f07c3bf
Binary files /dev/null and b/_static/img/thumbnails/cropped/NLP-From-Scratch-Translation-with-a-Sequence-to-Sequence-Network-and-Attention.png differ
diff --git a/_static/img/thumbnails/cropped/Pruning-Tutorial.png b/_static/img/thumbnails/cropped/Pruning-Tutorial.png
new file mode 100644
index 00000000000..32953c7ab19
Binary files /dev/null and b/_static/img/thumbnails/cropped/Pruning-Tutorial.png differ
diff --git a/_static/img/thumbnails/cropped/PyTorch-Distributed-Overview.png b/_static/img/thumbnails/cropped/PyTorch-Distributed-Overview.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/PyTorch-Distributed-Overview.png differ
diff --git a/_static/img/thumbnails/cropped/Sequence-to-Sequence-Modeling-with-nnTransformer-andTorchText.png b/_static/img/thumbnails/cropped/Sequence-to-Sequence-Modeling-with-nnTransformer-andTorchText.png
new file mode 100644
index 00000000000..00c4a236f24
Binary files /dev/null and b/_static/img/thumbnails/cropped/Sequence-to-Sequence-Modeling-with-nnTransformer-andTorchText.png differ
diff --git a/_static/img/thumbnails/cropped/TIAToolbox-Tutorial.png b/_static/img/thumbnails/cropped/TIAToolbox-Tutorial.png
new file mode 100644
index 00000000000..76f2bcaf4de
Binary files /dev/null and b/_static/img/thumbnails/cropped/TIAToolbox-Tutorial.png differ
diff --git a/_static/img/thumbnails/cropped/Text-Classification-with-TorchText.png b/_static/img/thumbnails/cropped/Text-Classification-with-TorchText.png
new file mode 100644
index 00000000000..e46aa333390
Binary files /dev/null and b/_static/img/thumbnails/cropped/Text-Classification-with-TorchText.png differ
diff --git a/_static/img/thumbnails/cropped/TorchScript-Parallelism.jpg b/_static/img/thumbnails/cropped/TorchScript-Parallelism.jpg
new file mode 100644
index 00000000000..237990a0460
Binary files /dev/null and b/_static/img/thumbnails/cropped/TorchScript-Parallelism.jpg differ
diff --git a/_static/img/thumbnails/cropped/TorchVision-Object-Detection-Finetuning-Tutorial.png b/_static/img/thumbnails/cropped/TorchVision-Object-Detection-Finetuning-Tutorial.png
new file mode 100644
index 00000000000..e79ff0d395e
Binary files /dev/null and b/_static/img/thumbnails/cropped/TorchVision-Object-Detection-Finetuning-Tutorial.png differ
diff --git a/_static/img/thumbnails/cropped/Training-Transformer-Models-using-Distributed-Data-Parallel-and-Pipeline-Parallelism.png b/_static/img/thumbnails/cropped/Training-Transformer-Models-using-Distributed-Data-Parallel-and-Pipeline-Parallelism.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Training-Transformer-Models-using-Distributed-Data-Parallel-and-Pipeline-Parallelism.png differ
diff --git a/_static/img/thumbnails/cropped/Training-Transformer-models-using-Pipeline-Parallelism.png b/_static/img/thumbnails/cropped/Training-Transformer-models-using-Pipeline-Parallelism.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Training-Transformer-models-using-Pipeline-Parallelism.png differ
diff --git a/_static/img/thumbnails/cropped/Transfer-Learning-for-Computer-Vision-Tutorial.png b/_static/img/thumbnails/cropped/Transfer-Learning-for-Computer-Vision-Tutorial.png
new file mode 100644
index 00000000000..029f0ff1bea
Binary files /dev/null and b/_static/img/thumbnails/cropped/Transfer-Learning-for-Computer-Vision-Tutorial.png differ
diff --git a/_static/img/thumbnails/cropped/Tutorials_Card_Template.psd b/_static/img/thumbnails/cropped/Tutorials_Card_Template.psd
new file mode 100644
index 00000000000..6caf48a5951
Binary files /dev/null and b/_static/img/thumbnails/cropped/Tutorials_Card_Template.psd differ
diff --git a/_static/img/thumbnails/cropped/Using-the-PyTorch-Cpp-Frontend.png b/_static/img/thumbnails/cropped/Using-the-PyTorch-Cpp-Frontend.png
new file mode 100644
index 00000000000..3aec75031ae
Binary files /dev/null and b/_static/img/thumbnails/cropped/Using-the-PyTorch-Cpp-Frontend.png differ
diff --git a/_static/img/thumbnails/cropped/Writing-Distributed-Applications-with-PyTorch.png b/_static/img/thumbnails/cropped/Writing-Distributed-Applications-with-PyTorch.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/Writing-Distributed-Applications-with-PyTorch.png differ
diff --git a/_static/img/thumbnails/cropped/advanced-PyTorch-1point0-Distributed-Trainer-with-Amazon-AWS.png b/_static/img/thumbnails/cropped/advanced-PyTorch-1point0-Distributed-Trainer-with-Amazon-AWS.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/advanced-PyTorch-1point0-Distributed-Trainer-with-Amazon-AWS.png differ
diff --git a/_static/img/thumbnails/cropped/amp.png b/_static/img/thumbnails/cropped/amp.png
new file mode 100644
index 00000000000..a6916ce5605
Binary files /dev/null and b/_static/img/thumbnails/cropped/amp.png differ
diff --git a/_static/img/thumbnails/cropped/android.png b/_static/img/thumbnails/cropped/android.png
new file mode 100644
index 00000000000..5c6079d9090
Binary files /dev/null and b/_static/img/thumbnails/cropped/android.png differ
diff --git a/_static/img/thumbnails/cropped/custom-datasets-transforms-and-dataloaders.png b/_static/img/thumbnails/cropped/custom-datasets-transforms-and-dataloaders.png
new file mode 100644
index 00000000000..5f73aa5663c
Binary files /dev/null and b/_static/img/thumbnails/cropped/custom-datasets-transforms-and-dataloaders.png differ
diff --git a/_static/img/thumbnails/cropped/defining-a-network.PNG b/_static/img/thumbnails/cropped/defining-a-network.PNG
new file mode 100644
index 00000000000..ded6a9ed583
Binary files /dev/null and b/_static/img/thumbnails/cropped/defining-a-network.PNG differ
diff --git a/_static/img/thumbnails/cropped/experimental-Channels-Last-Memory-Format-in-PyTorch.png b/_static/img/thumbnails/cropped/experimental-Channels-Last-Memory-Format-in-PyTorch.png
new file mode 100644
index 00000000000..18cbc1d0bc2
Binary files /dev/null and b/_static/img/thumbnails/cropped/experimental-Channels-Last-Memory-Format-in-PyTorch.png differ
diff --git a/_static/img/thumbnails/cropped/experimental-Introduction-to-Named-Tensors-in-PyTorch.png b/_static/img/thumbnails/cropped/experimental-Introduction-to-Named-Tensors-in-PyTorch.png
new file mode 100644
index 00000000000..d52414ec275
Binary files /dev/null and b/_static/img/thumbnails/cropped/experimental-Introduction-to-Named-Tensors-in-PyTorch.png differ
diff --git a/_static/img/thumbnails/cropped/experimental-Quantized-Transfer-Learning-for-Computer-Vision-Tutorial.png b/_static/img/thumbnails/cropped/experimental-Quantized-Transfer-Learning-for-Computer-Vision-Tutorial.png
new file mode 100644
index 00000000000..d826d8170c1
Binary files /dev/null and b/_static/img/thumbnails/cropped/experimental-Quantized-Transfer-Learning-for-Computer-Vision-Tutorial.png differ
diff --git a/_static/img/thumbnails/cropped/experimental-Static-Quantization-with-Eager-Mode-in-PyTorch.png b/_static/img/thumbnails/cropped/experimental-Static-Quantization-with-Eager-Mode-in-PyTorch.png
new file mode 100644
index 00000000000..d826d8170c1
Binary files /dev/null and b/_static/img/thumbnails/cropped/experimental-Static-Quantization-with-Eager-Mode-in-PyTorch.png differ
diff --git a/_static/img/thumbnails/cropped/generic-pytorch-logo.png b/_static/img/thumbnails/cropped/generic-pytorch-logo.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/generic-pytorch-logo.png differ
diff --git a/_static/img/thumbnails/cropped/ios.png b/_static/img/thumbnails/cropped/ios.png
new file mode 100644
index 00000000000..8c1d4a2b04d
Binary files /dev/null and b/_static/img/thumbnails/cropped/ios.png differ
diff --git a/_static/img/thumbnails/cropped/knowledge_distillation_pytorch_logo.png b/_static/img/thumbnails/cropped/knowledge_distillation_pytorch_logo.png
new file mode 100644
index 00000000000..3ce40781542
Binary files /dev/null and b/_static/img/thumbnails/cropped/knowledge_distillation_pytorch_logo.png differ
diff --git a/_static/img/thumbnails/cropped/learning-pytorch-with-examples.png b/_static/img/thumbnails/cropped/learning-pytorch-with-examples.png
new file mode 100644
index 00000000000..b292603835b
Binary files /dev/null and b/_static/img/thumbnails/cropped/learning-pytorch-with-examples.png differ
diff --git a/_static/img/thumbnails/cropped/loading-data-in-pytorch.png b/_static/img/thumbnails/cropped/loading-data-in-pytorch.png
new file mode 100644
index 00000000000..20309e32cf5
Binary files /dev/null and b/_static/img/thumbnails/cropped/loading-data-in-pytorch.png differ
diff --git a/_static/img/thumbnails/cropped/loading-data.PNG b/_static/img/thumbnails/cropped/loading-data.PNG
new file mode 100644
index 00000000000..0cb07e34e5e
Binary files /dev/null and b/_static/img/thumbnails/cropped/loading-data.PNG differ
diff --git a/_static/img/thumbnails/cropped/model-interpretability-using-captum.png b/_static/img/thumbnails/cropped/model-interpretability-using-captum.png
new file mode 100644
index 00000000000..2e531ae27c9
Binary files /dev/null and b/_static/img/thumbnails/cropped/model-interpretability-using-captum.png differ
diff --git a/_static/img/thumbnails/cropped/parametrizations.png b/_static/img/thumbnails/cropped/parametrizations.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/parametrizations.png differ
diff --git a/_static/img/thumbnails/cropped/profile.png b/_static/img/thumbnails/cropped/profile.png
new file mode 100644
index 00000000000..372db8bbe87
Binary files /dev/null and b/_static/img/thumbnails/cropped/profile.png differ
diff --git a/_static/img/thumbnails/cropped/profiler.png b/_static/img/thumbnails/cropped/profiler.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/profiler.png differ
diff --git a/_static/img/thumbnails/cropped/pytorch-logo.png b/_static/img/thumbnails/cropped/pytorch-logo.png
new file mode 100644
index 00000000000..426a14d98f5
Binary files /dev/null and b/_static/img/thumbnails/cropped/pytorch-logo.png differ
diff --git a/_static/img/thumbnails/cropped/realtime_rpi.png b/_static/img/thumbnails/cropped/realtime_rpi.png
new file mode 100644
index 00000000000..b233f3df3a1
Binary files /dev/null and b/_static/img/thumbnails/cropped/realtime_rpi.png differ
diff --git a/_static/img/thumbnails/cropped/saving-and-loading-general-checkpoint.PNG b/_static/img/thumbnails/cropped/saving-and-loading-general-checkpoint.PNG
new file mode 100644
index 00000000000..ba351430712
Binary files /dev/null and b/_static/img/thumbnails/cropped/saving-and-loading-general-checkpoint.PNG differ
diff --git a/_static/img/thumbnails/cropped/saving-and-loading-models-across-devices.PNG b/_static/img/thumbnails/cropped/saving-and-loading-models-across-devices.PNG
new file mode 100644
index 00000000000..a1c337928a1
Binary files /dev/null and b/_static/img/thumbnails/cropped/saving-and-loading-models-across-devices.PNG differ
diff --git a/_static/img/thumbnails/cropped/saving-and-loading-models-for-inference.PNG b/_static/img/thumbnails/cropped/saving-and-loading-models-for-inference.PNG
new file mode 100644
index 00000000000..b8075559c1d
Binary files /dev/null and b/_static/img/thumbnails/cropped/saving-and-loading-models-for-inference.PNG differ
diff --git a/_static/img/thumbnails/cropped/saving-multiple-models.PNG b/_static/img/thumbnails/cropped/saving-multiple-models.PNG
new file mode 100644
index 00000000000..2917cac557a
Binary files /dev/null and b/_static/img/thumbnails/cropped/saving-multiple-models.PNG differ
diff --git a/_static/img/thumbnails/cropped/torch-nn.png b/_static/img/thumbnails/cropped/torch-nn.png
new file mode 100644
index 00000000000..44a3e8dca1d
Binary files /dev/null and b/_static/img/thumbnails/cropped/torch-nn.png differ
diff --git a/_static/img/thumbnails/cropped/torch_text_logo.png b/_static/img/thumbnails/cropped/torch_text_logo.png
new file mode 100644
index 00000000000..3fe736d60e2
Binary files /dev/null and b/_static/img/thumbnails/cropped/torch_text_logo.png differ
diff --git a/_static/img/thumbnails/cropped/torchaudio-Tutorial.png b/_static/img/thumbnails/cropped/torchaudio-Tutorial.png
new file mode 100644
index 00000000000..c49aa17c02f
Binary files /dev/null and b/_static/img/thumbnails/cropped/torchaudio-Tutorial.png differ
diff --git a/_static/img/thumbnails/cropped/torchaudio-alignment.png b/_static/img/thumbnails/cropped/torchaudio-alignment.png
new file mode 100644
index 00000000000..d5a25f35219
Binary files /dev/null and b/_static/img/thumbnails/cropped/torchaudio-alignment.png differ
diff --git a/_static/img/thumbnails/cropped/torchaudio-asr.png b/_static/img/thumbnails/cropped/torchaudio-asr.png
new file mode 100644
index 00000000000..ff84f3ff3f1
Binary files /dev/null and b/_static/img/thumbnails/cropped/torchaudio-asr.png differ
diff --git a/_static/img/thumbnails/cropped/torchaudio-speech.png b/_static/img/thumbnails/cropped/torchaudio-speech.png
new file mode 100644
index 00000000000..c874a6bb482
Binary files /dev/null and b/_static/img/thumbnails/cropped/torchaudio-speech.png differ
diff --git a/_static/img/thumbnails/cropped/torchscript_overview.png b/_static/img/thumbnails/cropped/torchscript_overview.png
new file mode 100644
index 00000000000..63e599b1a80
Binary files /dev/null and b/_static/img/thumbnails/cropped/torchscript_overview.png differ
diff --git a/_static/img/thumbnails/cropped/understanding_leaf_vs_nonleaf.png b/_static/img/thumbnails/cropped/understanding_leaf_vs_nonleaf.png
new file mode 100644
index 00000000000..0590cf227d9
Binary files /dev/null and b/_static/img/thumbnails/cropped/understanding_leaf_vs_nonleaf.png differ
diff --git a/_static/img/thumbnails/cropped/using-flask-create-restful-api.png b/_static/img/thumbnails/cropped/using-flask-create-restful-api.png
new file mode 100644
index 00000000000..176c4de6d5b
Binary files /dev/null and b/_static/img/thumbnails/cropped/using-flask-create-restful-api.png differ
diff --git a/_static/img/thumbnails/cropped/visualizing-with-tensorboard.png b/_static/img/thumbnails/cropped/visualizing-with-tensorboard.png
new file mode 100644
index 00000000000..8fdecca65fe
Binary files /dev/null and b/_static/img/thumbnails/cropped/visualizing-with-tensorboard.png differ
diff --git a/_static/img/thumbnails/cropped/visualizing_gradients_tutorial.png b/_static/img/thumbnails/cropped/visualizing_gradients_tutorial.png
new file mode 100644
index 00000000000..6ff6d97f2e2
Binary files /dev/null and b/_static/img/thumbnails/cropped/visualizing_gradients_tutorial.png differ
diff --git a/_static/img/thumbnails/cropped/warmstarting-models.PNG b/_static/img/thumbnails/cropped/warmstarting-models.PNG
new file mode 100644
index 00000000000..385f2ab80c8
Binary files /dev/null and b/_static/img/thumbnails/cropped/warmstarting-models.PNG differ
diff --git a/_static/img/thumbnails/cropped/what-is-a-state-dict.PNG b/_static/img/thumbnails/cropped/what-is-a-state-dict.PNG
new file mode 100644
index 00000000000..b0eee89ad73
Binary files /dev/null and b/_static/img/thumbnails/cropped/what-is-a-state-dict.PNG differ
diff --git a/_static/img/thumbnails/cropped/zeroing-out-gradients.PNG b/_static/img/thumbnails/cropped/zeroing-out-gradients.PNG
new file mode 100644
index 00000000000..0f21b230abf
Binary files /dev/null and b/_static/img/thumbnails/cropped/zeroing-out-gradients.PNG differ
diff --git a/_static/img/thumbnails/custom_dataset.png b/_static/img/thumbnails/custom_dataset.png
new file mode 100644
index 00000000000..59a8993bc4a
Binary files /dev/null and b/_static/img/thumbnails/custom_dataset.png differ
diff --git a/_static/img/thumbnails/defining_a_network.png b/_static/img/thumbnails/defining_a_network.png
new file mode 100644
index 00000000000..f0c0a940713
Binary files /dev/null and b/_static/img/thumbnails/defining_a_network.png differ
diff --git a/_static/img/thumbnails/german_to_english_translation.png b/_static/img/thumbnails/german_to_english_translation.png
new file mode 100644
index 00000000000..a3560c574a6
Binary files /dev/null and b/_static/img/thumbnails/german_to_english_translation.png differ
diff --git a/_static/img/thumbnails/pixelated-cat.png b/_static/img/thumbnails/pixelated-cat.png
new file mode 100644
index 00000000000..c3d527f0691
Binary files /dev/null and b/_static/img/thumbnails/pixelated-cat.png differ
diff --git a/_static/img/thumbnails/pytorch_tensorboard.png b/_static/img/thumbnails/pytorch_tensorboard.png
new file mode 100644
index 00000000000..2dce6a6e268
Binary files /dev/null and b/_static/img/thumbnails/pytorch_tensorboard.png differ
diff --git a/_static/img/thumbnails/tensorboard_dev.png b/_static/img/thumbnails/tensorboard_dev.png
new file mode 100644
index 00000000000..056839a6359
Binary files /dev/null and b/_static/img/thumbnails/tensorboard_dev.png differ
diff --git a/_static/img/thumbnails/tensorboard_scalars.png b/_static/img/thumbnails/tensorboard_scalars.png
new file mode 100644
index 00000000000..ab6734ab3e9
Binary files /dev/null and b/_static/img/thumbnails/tensorboard_scalars.png differ
diff --git a/_static/img/thumbnails/torchrec.png b/_static/img/thumbnails/torchrec.png
new file mode 100644
index 00000000000..1304b56873e
Binary files /dev/null and b/_static/img/thumbnails/torchrec.png differ
diff --git a/_static/img/thumbnails/torchtext.png b/_static/img/thumbnails/torchtext.png
new file mode 100644
index 00000000000..ee4285aef34
Binary files /dev/null and b/_static/img/thumbnails/torchtext.png differ
diff --git a/_static/img/torchscript.png b/_static/img/torchscript.png
new file mode 100644
index 00000000000..b748d45d231
Binary files /dev/null and b/_static/img/torchscript.png differ
diff --git a/_static/img/torchscript_to_cpp.png b/_static/img/torchscript_to_cpp.png
new file mode 100644
index 00000000000..579d65b00d4
Binary files /dev/null and b/_static/img/torchscript_to_cpp.png differ
diff --git a/_static/img/torchserve-ipex-images-2/1.png b/_static/img/torchserve-ipex-images-2/1.png
new file mode 100644
index 00000000000..4a9f488236b
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/1.png differ
diff --git a/_static/img/torchserve-ipex-images-2/10.png b/_static/img/torchserve-ipex-images-2/10.png
new file mode 100644
index 00000000000..d56f34600d8
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/10.png differ
diff --git a/_static/img/torchserve-ipex-images-2/11.png b/_static/img/torchserve-ipex-images-2/11.png
new file mode 100644
index 00000000000..8ebbcc03d51
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/11.png differ
diff --git a/_static/img/torchserve-ipex-images-2/12.png b/_static/img/torchserve-ipex-images-2/12.png
new file mode 100644
index 00000000000..23c4794ae36
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/12.png differ
diff --git a/_static/img/torchserve-ipex-images-2/13.png b/_static/img/torchserve-ipex-images-2/13.png
new file mode 100644
index 00000000000..4e1dc6e1a03
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/13.png differ
diff --git a/_static/img/torchserve-ipex-images-2/14.png b/_static/img/torchserve-ipex-images-2/14.png
new file mode 100644
index 00000000000..701399e9d9b
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/14.png differ
diff --git a/_static/img/torchserve-ipex-images-2/15.png b/_static/img/torchserve-ipex-images-2/15.png
new file mode 100644
index 00000000000..b345a9d0d8c
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/15.png differ
diff --git a/_static/img/torchserve-ipex-images-2/16.png b/_static/img/torchserve-ipex-images-2/16.png
new file mode 100644
index 00000000000..39b5d6afb9c
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/16.png differ
diff --git a/_static/img/torchserve-ipex-images-2/17.png b/_static/img/torchserve-ipex-images-2/17.png
new file mode 100644
index 00000000000..bb7359bcbe6
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/17.png differ
diff --git a/_static/img/torchserve-ipex-images-2/18.png b/_static/img/torchserve-ipex-images-2/18.png
new file mode 100644
index 00000000000..30ad817a561
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/18.png differ
diff --git a/_static/img/torchserve-ipex-images-2/19.png b/_static/img/torchserve-ipex-images-2/19.png
new file mode 100644
index 00000000000..353bfb897a1
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/19.png differ
diff --git a/_static/img/torchserve-ipex-images-2/2.png b/_static/img/torchserve-ipex-images-2/2.png
new file mode 100644
index 00000000000..d7d351a3e74
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/2.png differ
diff --git a/_static/img/torchserve-ipex-images-2/20.png b/_static/img/torchserve-ipex-images-2/20.png
new file mode 100644
index 00000000000..aa94ff57dce
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/20.png differ
diff --git a/_static/img/torchserve-ipex-images-2/21.png b/_static/img/torchserve-ipex-images-2/21.png
new file mode 100644
index 00000000000..c714adc1453
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/21.png differ
diff --git a/_static/img/torchserve-ipex-images-2/22.png b/_static/img/torchserve-ipex-images-2/22.png
new file mode 100644
index 00000000000..fa7ae84c702
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/22.png differ
diff --git a/_static/img/torchserve-ipex-images-2/23.png b/_static/img/torchserve-ipex-images-2/23.png
new file mode 100644
index 00000000000..fd8a1bf8389
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/23.png differ
diff --git a/_static/img/torchserve-ipex-images-2/24.png b/_static/img/torchserve-ipex-images-2/24.png
new file mode 100644
index 00000000000..6ba858f98f0
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/24.png differ
diff --git a/_static/img/torchserve-ipex-images-2/3.png b/_static/img/torchserve-ipex-images-2/3.png
new file mode 100644
index 00000000000..6ae485bd132
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/3.png differ
diff --git a/_static/img/torchserve-ipex-images-2/4.png b/_static/img/torchserve-ipex-images-2/4.png
new file mode 100644
index 00000000000..b0fa5e68133
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/4.png differ
diff --git a/_static/img/torchserve-ipex-images-2/5.png b/_static/img/torchserve-ipex-images-2/5.png
new file mode 100644
index 00000000000..25adc177ad1
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/5.png differ
diff --git a/_static/img/torchserve-ipex-images-2/6.png b/_static/img/torchserve-ipex-images-2/6.png
new file mode 100644
index 00000000000..739d3b388d3
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/6.png differ
diff --git a/_static/img/torchserve-ipex-images-2/7.png b/_static/img/torchserve-ipex-images-2/7.png
new file mode 100644
index 00000000000..77765616d65
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/7.png differ
diff --git a/_static/img/torchserve-ipex-images-2/8.png b/_static/img/torchserve-ipex-images-2/8.png
new file mode 100644
index 00000000000..b731676cc21
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/8.png differ
diff --git a/_static/img/torchserve-ipex-images-2/9.png b/_static/img/torchserve-ipex-images-2/9.png
new file mode 100644
index 00000000000..9155201ab3c
Binary files /dev/null and b/_static/img/torchserve-ipex-images-2/9.png differ
diff --git a/_static/img/torchserve-ipex-images/1.png b/_static/img/torchserve-ipex-images/1.png
new file mode 100644
index 00000000000..fc8748b22a5
Binary files /dev/null and b/_static/img/torchserve-ipex-images/1.png differ
diff --git a/_static/img/torchserve-ipex-images/10.png b/_static/img/torchserve-ipex-images/10.png
new file mode 100644
index 00000000000..833a1bb7cf9
Binary files /dev/null and b/_static/img/torchserve-ipex-images/10.png differ
diff --git a/_static/img/torchserve-ipex-images/11.gif b/_static/img/torchserve-ipex-images/11.gif
new file mode 100644
index 00000000000..1c1a2644e8e
Binary files /dev/null and b/_static/img/torchserve-ipex-images/11.gif differ
diff --git a/_static/img/torchserve-ipex-images/12.png b/_static/img/torchserve-ipex-images/12.png
new file mode 100644
index 00000000000..b55968fd705
Binary files /dev/null and b/_static/img/torchserve-ipex-images/12.png differ
diff --git a/_static/img/torchserve-ipex-images/13.png b/_static/img/torchserve-ipex-images/13.png
new file mode 100644
index 00000000000..de9c08814e6
Binary files /dev/null and b/_static/img/torchserve-ipex-images/13.png differ
diff --git a/_static/img/torchserve-ipex-images/14.png b/_static/img/torchserve-ipex-images/14.png
new file mode 100644
index 00000000000..4d776d81647
Binary files /dev/null and b/_static/img/torchserve-ipex-images/14.png differ
diff --git a/_static/img/torchserve-ipex-images/15.png b/_static/img/torchserve-ipex-images/15.png
new file mode 100644
index 00000000000..513ccf8e053
Binary files /dev/null and b/_static/img/torchserve-ipex-images/15.png differ
diff --git a/_static/img/torchserve-ipex-images/16.png b/_static/img/torchserve-ipex-images/16.png
new file mode 100644
index 00000000000..3670d0a1dc4
Binary files /dev/null and b/_static/img/torchserve-ipex-images/16.png differ
diff --git a/_static/img/torchserve-ipex-images/17.png b/_static/img/torchserve-ipex-images/17.png
new file mode 100644
index 00000000000..5ab17373c95
Binary files /dev/null and b/_static/img/torchserve-ipex-images/17.png differ
diff --git a/_static/img/torchserve-ipex-images/18.png b/_static/img/torchserve-ipex-images/18.png
new file mode 100644
index 00000000000..50304884d3e
Binary files /dev/null and b/_static/img/torchserve-ipex-images/18.png differ
diff --git a/_static/img/torchserve-ipex-images/19.png b/_static/img/torchserve-ipex-images/19.png
new file mode 100644
index 00000000000..b123480530e
Binary files /dev/null and b/_static/img/torchserve-ipex-images/19.png differ
diff --git a/_static/img/torchserve-ipex-images/1_.png b/_static/img/torchserve-ipex-images/1_.png
new file mode 100644
index 00000000000..fc8748b22a5
Binary files /dev/null and b/_static/img/torchserve-ipex-images/1_.png differ
diff --git a/_static/img/torchserve-ipex-images/2.png b/_static/img/torchserve-ipex-images/2.png
new file mode 100644
index 00000000000..27633f25bcb
Binary files /dev/null and b/_static/img/torchserve-ipex-images/2.png differ
diff --git a/_static/img/torchserve-ipex-images/20.gif b/_static/img/torchserve-ipex-images/20.gif
new file mode 100644
index 00000000000..ba8e9e95315
Binary files /dev/null and b/_static/img/torchserve-ipex-images/20.gif differ
diff --git a/_static/img/torchserve-ipex-images/21.png b/_static/img/torchserve-ipex-images/21.png
new file mode 100644
index 00000000000..04b3ca622bf
Binary files /dev/null and b/_static/img/torchserve-ipex-images/21.png differ
diff --git a/_static/img/torchserve-ipex-images/22.png b/_static/img/torchserve-ipex-images/22.png
new file mode 100644
index 00000000000..cbb2c269a90
Binary files /dev/null and b/_static/img/torchserve-ipex-images/22.png differ
diff --git a/_static/img/torchserve-ipex-images/23.png b/_static/img/torchserve-ipex-images/23.png
new file mode 100644
index 00000000000..c9bc44463f3
Binary files /dev/null and b/_static/img/torchserve-ipex-images/23.png differ
diff --git a/_static/img/torchserve-ipex-images/24.png b/_static/img/torchserve-ipex-images/24.png
new file mode 100644
index 00000000000..8b5718c30f3
Binary files /dev/null and b/_static/img/torchserve-ipex-images/24.png differ
diff --git a/_static/img/torchserve-ipex-images/25.png b/_static/img/torchserve-ipex-images/25.png
new file mode 100644
index 00000000000..4de920e632b
Binary files /dev/null and b/_static/img/torchserve-ipex-images/25.png differ
diff --git a/_static/img/torchserve-ipex-images/26.gif b/_static/img/torchserve-ipex-images/26.gif
new file mode 100644
index 00000000000..60a5a64ad15
Binary files /dev/null and b/_static/img/torchserve-ipex-images/26.gif differ
diff --git a/_static/img/torchserve-ipex-images/27.png b/_static/img/torchserve-ipex-images/27.png
new file mode 100644
index 00000000000..c7e766155f5
Binary files /dev/null and b/_static/img/torchserve-ipex-images/27.png differ
diff --git a/_static/img/torchserve-ipex-images/28.png b/_static/img/torchserve-ipex-images/28.png
new file mode 100644
index 00000000000..b7056c4c4ac
Binary files /dev/null and b/_static/img/torchserve-ipex-images/28.png differ
diff --git a/_static/img/torchserve-ipex-images/29.png b/_static/img/torchserve-ipex-images/29.png
new file mode 100644
index 00000000000..9dcd8735111
Binary files /dev/null and b/_static/img/torchserve-ipex-images/29.png differ
diff --git a/_static/img/torchserve-ipex-images/3.png b/_static/img/torchserve-ipex-images/3.png
new file mode 100644
index 00000000000..2309071571c
Binary files /dev/null and b/_static/img/torchserve-ipex-images/3.png differ
diff --git a/_static/img/torchserve-ipex-images/30.png b/_static/img/torchserve-ipex-images/30.png
new file mode 100644
index 00000000000..96b07ec7205
Binary files /dev/null and b/_static/img/torchserve-ipex-images/30.png differ
diff --git a/_static/img/torchserve-ipex-images/31.png b/_static/img/torchserve-ipex-images/31.png
new file mode 100644
index 00000000000..601b63e511d
Binary files /dev/null and b/_static/img/torchserve-ipex-images/31.png differ
diff --git a/_static/img/torchserve-ipex-images/4.png b/_static/img/torchserve-ipex-images/4.png
new file mode 100644
index 00000000000..f12d8c7cc40
Binary files /dev/null and b/_static/img/torchserve-ipex-images/4.png differ
diff --git a/_static/img/torchserve-ipex-images/5.png b/_static/img/torchserve-ipex-images/5.png
new file mode 100644
index 00000000000..55e05e5e53c
Binary files /dev/null and b/_static/img/torchserve-ipex-images/5.png differ
diff --git a/_static/img/torchserve-ipex-images/6.png b/_static/img/torchserve-ipex-images/6.png
new file mode 100644
index 00000000000..59a028f94b9
Binary files /dev/null and b/_static/img/torchserve-ipex-images/6.png differ
diff --git a/_static/img/torchserve-ipex-images/7.png b/_static/img/torchserve-ipex-images/7.png
new file mode 100644
index 00000000000..5739cb4f53a
Binary files /dev/null and b/_static/img/torchserve-ipex-images/7.png differ
diff --git a/_static/img/torchserve-ipex-images/8.png b/_static/img/torchserve-ipex-images/8.png
new file mode 100644
index 00000000000..1e6531b6cab
Binary files /dev/null and b/_static/img/torchserve-ipex-images/8.png differ
diff --git a/_static/img/torchserve-ipex-images/9.gif b/_static/img/torchserve-ipex-images/9.gif
new file mode 100644
index 00000000000..682e2f3425e
Binary files /dev/null and b/_static/img/torchserve-ipex-images/9.gif differ
diff --git a/_static/img/trace_img.png b/_static/img/trace_img.png
new file mode 100644
index 00000000000..8c540ceb519
Binary files /dev/null and b/_static/img/trace_img.png differ
diff --git a/_static/img/trace_xpu_img.png b/_static/img/trace_xpu_img.png
new file mode 100644
index 00000000000..2eca0a78cb6
Binary files /dev/null and b/_static/img/trace_xpu_img.png differ
diff --git a/_static/img/transformer_architecture.jpg b/_static/img/transformer_architecture.jpg
new file mode 100644
index 00000000000..4188fae7c85
Binary files /dev/null and b/_static/img/transformer_architecture.jpg differ
diff --git a/_static/img/transformer_input_target.png b/_static/img/transformer_input_target.png
new file mode 100644
index 00000000000..02e87174762
Binary files /dev/null and b/_static/img/transformer_input_target.png differ
diff --git a/_static/img/tts_pipeline.png b/_static/img/tts_pipeline.png
new file mode 100644
index 00000000000..5dc37ae9ddd
Binary files /dev/null and b/_static/img/tts_pipeline.png differ
diff --git a/_static/img/tv_tutorial/tv_image01.png b/_static/img/tv_tutorial/tv_image01.png
deleted file mode 100644
index bb47d27d24e..00000000000
Binary files a/_static/img/tv_tutorial/tv_image01.png and /dev/null differ
diff --git a/_static/img/tv_tutorial/tv_image02.png b/_static/img/tv_tutorial/tv_image02.png
deleted file mode 100644
index 8717199010b..00000000000
Binary files a/_static/img/tv_tutorial/tv_image02.png and /dev/null differ
diff --git a/_static/img/tv_tutorial/tv_image05.png b/_static/img/tv_tutorial/tv_image05.png
deleted file mode 100644
index 3961033693a..00000000000
Binary files a/_static/img/tv_tutorial/tv_image05.png and /dev/null differ
diff --git a/_static/img/tv_tutorial/tv_image06.png b/_static/img/tv_tutorial/tv_image06.png
deleted file mode 100644
index b885866081a..00000000000
Binary files a/_static/img/tv_tutorial/tv_image06.png and /dev/null differ
diff --git a/_static/img/tv_tutorial/tv_image07.png b/_static/img/tv_tutorial/tv_image07.png
deleted file mode 100644
index e3d88cd5989..00000000000
Binary files a/_static/img/tv_tutorial/tv_image07.png and /dev/null differ
diff --git a/_static/img/understanding_leaf_vs_nonleaf/comp-graph-1.png b/_static/img/understanding_leaf_vs_nonleaf/comp-graph-1.png
new file mode 100644
index 00000000000..1fa3d80d339
Binary files /dev/null and b/_static/img/understanding_leaf_vs_nonleaf/comp-graph-1.png differ
diff --git a/_static/img/understanding_leaf_vs_nonleaf/comp-graph-2.png b/_static/img/understanding_leaf_vs_nonleaf/comp-graph-2.png
new file mode 100644
index 00000000000..3f76deab3bf
Binary files /dev/null and b/_static/img/understanding_leaf_vs_nonleaf/comp-graph-2.png differ
diff --git a/_static/img/usb_semisup_learn/code.png b/_static/img/usb_semisup_learn/code.png
new file mode 100644
index 00000000000..fdc7b798a37
Binary files /dev/null and b/_static/img/usb_semisup_learn/code.png differ
diff --git a/_static/jquery-3.2.1.js b/_static/jquery-3.2.1.js
deleted file mode 100755
index 2cbd2ab50e7..00000000000
--- a/_static/jquery-3.2.1.js
+++ /dev/null
@@ -1,10253 +0,0 @@
-/*!
- * jQuery JavaScript Library v3.2.1
- * https://jquery.com/
- *
- * Includes Sizzle.js
- * https://sizzlejs.com/
- *
- * Copyright JS Foundation and other contributors
- * Released under the MIT license
- * https://jquery.org/license
- *
- * Date: 2017-03-20T18:59Z
- */
-( function( global, factory ) {
-
- "use strict";
-
- if ( typeof module === "object" && typeof module.exports === "object" ) {
-
- // For CommonJS and CommonJS-like environments where a proper `window`
- // is present, execute the factory and get jQuery.
- // For environments that do not have a `window` with a `document`
- // (such as Node.js), expose a factory as module.exports.
- // This accentuates the need for the creation of a real `window`.
- // e.g. var jQuery = require("jquery")(window);
- // See ticket #14549 for more info.
- module.exports = global.document ?
- factory( global, true ) :
- function( w ) {
- if ( !w.document ) {
- throw new Error( "jQuery requires a window with a document" );
- }
- return factory( w );
- };
- } else {
- factory( global );
- }
-
-// Pass this if window is not defined yet
-} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) {
-
-// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1
-// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode
-// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common
-// enough that all such attempts are guarded in a try block.
-"use strict";
-
-var arr = [];
-
-var document = window.document;
-
-var getProto = Object.getPrototypeOf;
-
-var slice = arr.slice;
-
-var concat = arr.concat;
-
-var push = arr.push;
-
-var indexOf = arr.indexOf;
-
-var class2type = {};
-
-var toString = class2type.toString;
-
-var hasOwn = class2type.hasOwnProperty;
-
-var fnToString = hasOwn.toString;
-
-var ObjectFunctionString = fnToString.call( Object );
-
-var support = {};
-
-
-
- function DOMEval( code, doc ) {
- doc = doc || document;
-
- var script = doc.createElement( "script" );
-
- script.text = code;
- doc.head.appendChild( script ).parentNode.removeChild( script );
- }
-/* global Symbol */
-// Defining this global in .eslintrc.json would create a danger of using the global
-// unguarded in another place, it seems safer to define global only for this module
-
-
-
-var
- version = "3.2.1",
-
- // Define a local copy of jQuery
- jQuery = function( selector, context ) {
-
- // The jQuery object is actually just the init constructor 'enhanced'
- // Need init if jQuery is called (just allow error to be thrown if not included)
- return new jQuery.fn.init( selector, context );
- },
-
- // Support: Android <=4.0 only
- // Make sure we trim BOM and NBSP
- rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,
-
- // Matches dashed string for camelizing
- rmsPrefix = /^-ms-/,
- rdashAlpha = /-([a-z])/g,
-
- // Used by jQuery.camelCase as callback to replace()
- fcamelCase = function( all, letter ) {
- return letter.toUpperCase();
- };
-
-jQuery.fn = jQuery.prototype = {
-
- // The current version of jQuery being used
- jquery: version,
-
- constructor: jQuery,
-
- // The default length of a jQuery object is 0
- length: 0,
-
- toArray: function() {
- return slice.call( this );
- },
-
- // Get the Nth element in the matched element set OR
- // Get the whole matched element set as a clean array
- get: function( num ) {
-
- // Return all the elements in a clean array
- if ( num == null ) {
- return slice.call( this );
- }
-
- // Return just the one element from the set
- return num < 0 ? this[ num + this.length ] : this[ num ];
- },
-
- // Take an array of elements and push it onto the stack
- // (returning the new matched element set)
- pushStack: function( elems ) {
-
- // Build a new jQuery matched element set
- var ret = jQuery.merge( this.constructor(), elems );
-
- // Add the old object onto the stack (as a reference)
- ret.prevObject = this;
-
- // Return the newly-formed element set
- return ret;
- },
-
- // Execute a callback for every element in the matched set.
- each: function( callback ) {
- return jQuery.each( this, callback );
- },
-
- map: function( callback ) {
- return this.pushStack( jQuery.map( this, function( elem, i ) {
- return callback.call( elem, i, elem );
- } ) );
- },
-
- slice: function() {
- return this.pushStack( slice.apply( this, arguments ) );
- },
-
- first: function() {
- return this.eq( 0 );
- },
-
- last: function() {
- return this.eq( -1 );
- },
-
- eq: function( i ) {
- var len = this.length,
- j = +i + ( i < 0 ? len : 0 );
- return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] );
- },
-
- end: function() {
- return this.prevObject || this.constructor();
- },
-
- // For internal use only.
- // Behaves like an Array's method, not like a jQuery method.
- push: push,
- sort: arr.sort,
- splice: arr.splice
-};
-
-jQuery.extend = jQuery.fn.extend = function() {
- var options, name, src, copy, copyIsArray, clone,
- target = arguments[ 0 ] || {},
- i = 1,
- length = arguments.length,
- deep = false;
-
- // Handle a deep copy situation
- if ( typeof target === "boolean" ) {
- deep = target;
-
- // Skip the boolean and the target
- target = arguments[ i ] || {};
- i++;
- }
-
- // Handle case when target is a string or something (possible in deep copy)
- if ( typeof target !== "object" && !jQuery.isFunction( target ) ) {
- target = {};
- }
-
- // Extend jQuery itself if only one argument is passed
- if ( i === length ) {
- target = this;
- i--;
- }
-
- for ( ; i < length; i++ ) {
-
- // Only deal with non-null/undefined values
- if ( ( options = arguments[ i ] ) != null ) {
-
- // Extend the base object
- for ( name in options ) {
- src = target[ name ];
- copy = options[ name ];
-
- // Prevent never-ending loop
- if ( target === copy ) {
- continue;
- }
-
- // Recurse if we're merging plain objects or arrays
- if ( deep && copy && ( jQuery.isPlainObject( copy ) ||
- ( copyIsArray = Array.isArray( copy ) ) ) ) {
-
- if ( copyIsArray ) {
- copyIsArray = false;
- clone = src && Array.isArray( src ) ? src : [];
-
- } else {
- clone = src && jQuery.isPlainObject( src ) ? src : {};
- }
-
- // Never move original objects, clone them
- target[ name ] = jQuery.extend( deep, clone, copy );
-
- // Don't bring in undefined values
- } else if ( copy !== undefined ) {
- target[ name ] = copy;
- }
- }
- }
- }
-
- // Return the modified object
- return target;
-};
-
-jQuery.extend( {
-
- // Unique for each copy of jQuery on the page
- expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ),
-
- // Assume jQuery is ready without the ready module
- isReady: true,
-
- error: function( msg ) {
- throw new Error( msg );
- },
-
- noop: function() {},
-
- isFunction: function( obj ) {
- return jQuery.type( obj ) === "function";
- },
-
- isWindow: function( obj ) {
- return obj != null && obj === obj.window;
- },
-
- isNumeric: function( obj ) {
-
- // As of jQuery 3.0, isNumeric is limited to
- // strings and numbers (primitives or objects)
- // that can be coerced to finite numbers (gh-2662)
- var type = jQuery.type( obj );
- return ( type === "number" || type === "string" ) &&
-
- // parseFloat NaNs numeric-cast false positives ("")
- // ...but misinterprets leading-number strings, particularly hex literals ("0x...")
- // subtraction forces infinities to NaN
- !isNaN( obj - parseFloat( obj ) );
- },
-
- isPlainObject: function( obj ) {
- var proto, Ctor;
-
- // Detect obvious negatives
- // Use toString instead of jQuery.type to catch host objects
- if ( !obj || toString.call( obj ) !== "[object Object]" ) {
- return false;
- }
-
- proto = getProto( obj );
-
- // Objects with no prototype (e.g., `Object.create( null )`) are plain
- if ( !proto ) {
- return true;
- }
-
- // Objects with prototype are plain iff they were constructed by a global Object function
- Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor;
- return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString;
- },
-
- isEmptyObject: function( obj ) {
-
- /* eslint-disable no-unused-vars */
- // See https://github.com/eslint/eslint/issues/6125
- var name;
-
- for ( name in obj ) {
- return false;
- }
- return true;
- },
-
- type: function( obj ) {
- if ( obj == null ) {
- return obj + "";
- }
-
- // Support: Android <=2.3 only (functionish RegExp)
- return typeof obj === "object" || typeof obj === "function" ?
- class2type[ toString.call( obj ) ] || "object" :
- typeof obj;
- },
-
- // Evaluates a script in a global context
- globalEval: function( code ) {
- DOMEval( code );
- },
-
- // Convert dashed to camelCase; used by the css and data modules
- // Support: IE <=9 - 11, Edge 12 - 13
- // Microsoft forgot to hump their vendor prefix (#9572)
- camelCase: function( string ) {
- return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase );
- },
-
- each: function( obj, callback ) {
- var length, i = 0;
-
- if ( isArrayLike( obj ) ) {
- length = obj.length;
- for ( ; i < length; i++ ) {
- if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) {
- break;
- }
- }
- } else {
- for ( i in obj ) {
- if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) {
- break;
- }
- }
- }
-
- return obj;
- },
-
- // Support: Android <=4.0 only
- trim: function( text ) {
- return text == null ?
- "" :
- ( text + "" ).replace( rtrim, "" );
- },
-
- // results is for internal usage only
- makeArray: function( arr, results ) {
- var ret = results || [];
-
- if ( arr != null ) {
- if ( isArrayLike( Object( arr ) ) ) {
- jQuery.merge( ret,
- typeof arr === "string" ?
- [ arr ] : arr
- );
- } else {
- push.call( ret, arr );
- }
- }
-
- return ret;
- },
-
- inArray: function( elem, arr, i ) {
- return arr == null ? -1 : indexOf.call( arr, elem, i );
- },
-
- // Support: Android <=4.0 only, PhantomJS 1 only
- // push.apply(_, arraylike) throws on ancient WebKit
- merge: function( first, second ) {
- var len = +second.length,
- j = 0,
- i = first.length;
-
- for ( ; j < len; j++ ) {
- first[ i++ ] = second[ j ];
- }
-
- first.length = i;
-
- return first;
- },
-
- grep: function( elems, callback, invert ) {
- var callbackInverse,
- matches = [],
- i = 0,
- length = elems.length,
- callbackExpect = !invert;
-
- // Go through the array, only saving the items
- // that pass the validator function
- for ( ; i < length; i++ ) {
- callbackInverse = !callback( elems[ i ], i );
- if ( callbackInverse !== callbackExpect ) {
- matches.push( elems[ i ] );
- }
- }
-
- return matches;
- },
-
- // arg is for internal usage only
- map: function( elems, callback, arg ) {
- var length, value,
- i = 0,
- ret = [];
-
- // Go through the array, translating each of the items to their new values
- if ( isArrayLike( elems ) ) {
- length = elems.length;
- for ( ; i < length; i++ ) {
- value = callback( elems[ i ], i, arg );
-
- if ( value != null ) {
- ret.push( value );
- }
- }
-
- // Go through every key on the object,
- } else {
- for ( i in elems ) {
- value = callback( elems[ i ], i, arg );
-
- if ( value != null ) {
- ret.push( value );
- }
- }
- }
-
- // Flatten any nested arrays
- return concat.apply( [], ret );
- },
-
- // A global GUID counter for objects
- guid: 1,
-
- // Bind a function to a context, optionally partially applying any
- // arguments.
- proxy: function( fn, context ) {
- var tmp, args, proxy;
-
- if ( typeof context === "string" ) {
- tmp = fn[ context ];
- context = fn;
- fn = tmp;
- }
-
- // Quick check to determine if target is callable, in the spec
- // this throws a TypeError, but we will just return undefined.
- if ( !jQuery.isFunction( fn ) ) {
- return undefined;
- }
-
- // Simulated bind
- args = slice.call( arguments, 2 );
- proxy = function() {
- return fn.apply( context || this, args.concat( slice.call( arguments ) ) );
- };
-
- // Set the guid of unique handler to the same of original handler, so it can be removed
- proxy.guid = fn.guid = fn.guid || jQuery.guid++;
-
- return proxy;
- },
-
- now: Date.now,
-
- // jQuery.support is not used in Core but other projects attach their
- // properties to it so it needs to exist.
- support: support
-} );
-
-if ( typeof Symbol === "function" ) {
- jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ];
-}
-
-// Populate the class2type map
-jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ),
-function( i, name ) {
- class2type[ "[object " + name + "]" ] = name.toLowerCase();
-} );
-
-function isArrayLike( obj ) {
-
- // Support: real iOS 8.2 only (not reproducible in simulator)
- // `in` check used to prevent JIT error (gh-2145)
- // hasOwn isn't used here due to false negatives
- // regarding Nodelist length in IE
- var length = !!obj && "length" in obj && obj.length,
- type = jQuery.type( obj );
-
- if ( type === "function" || jQuery.isWindow( obj ) ) {
- return false;
- }
-
- return type === "array" || length === 0 ||
- typeof length === "number" && length > 0 && ( length - 1 ) in obj;
-}
-var Sizzle =
-/*!
- * Sizzle CSS Selector Engine v2.3.3
- * https://sizzlejs.com/
- *
- * Copyright jQuery Foundation and other contributors
- * Released under the MIT license
- * https://jquery.org/license
- *
- * Date: 2016-08-08
- */
-(function( window ) {
-
-var i,
- support,
- Expr,
- getText,
- isXML,
- tokenize,
- compile,
- select,
- outermostContext,
- sortInput,
- hasDuplicate,
-
- // Local document vars
- setDocument,
- document,
- docElem,
- documentIsHTML,
- rbuggyQSA,
- rbuggyMatches,
- matches,
- contains,
-
- // Instance-specific data
- expando = "sizzle" + 1 * new Date(),
- preferredDoc = window.document,
- dirruns = 0,
- done = 0,
- classCache = createCache(),
- tokenCache = createCache(),
- compilerCache = createCache(),
- sortOrder = function( a, b ) {
- if ( a === b ) {
- hasDuplicate = true;
- }
- return 0;
- },
-
- // Instance methods
- hasOwn = ({}).hasOwnProperty,
- arr = [],
- pop = arr.pop,
- push_native = arr.push,
- push = arr.push,
- slice = arr.slice,
- // Use a stripped-down indexOf as it's faster than native
- // https://jsperf.com/thor-indexof-vs-for/5
- indexOf = function( list, elem ) {
- var i = 0,
- len = list.length;
- for ( ; i < len; i++ ) {
- if ( list[i] === elem ) {
- return i;
- }
- }
- return -1;
- },
-
- booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",
-
- // Regular expressions
-
- // https://www.w3.org/TR/css3-selectors/#whitespace
- whitespace = "[\\x20\\t\\r\\n\\f]",
-
- // https://www.w3.org/TR/CSS21/syndata.html#value-def-identifier
- identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+",
-
- // Attribute selectors: https://www.w3.org/TR/selectors/#attribute-selectors
- attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace +
- // Operator (capture 2)
- "*([*^$|!~]?=)" + whitespace +
- // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]"
- "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace +
- "*\\]",
-
- pseudos = ":(" + identifier + ")(?:\\((" +
- // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments:
- // 1. quoted (capture 3; capture 4 or capture 5)
- "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" +
- // 2. simple (capture 6)
- "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" +
- // 3. anything else (capture 2)
- ".*" +
- ")\\)|)",
-
- // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter
- rwhitespace = new RegExp( whitespace + "+", "g" ),
- rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ),
-
- rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ),
- rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ),
-
- rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ),
-
- rpseudo = new RegExp( pseudos ),
- ridentifier = new RegExp( "^" + identifier + "$" ),
-
- matchExpr = {
- "ID": new RegExp( "^#(" + identifier + ")" ),
- "CLASS": new RegExp( "^\\.(" + identifier + ")" ),
- "TAG": new RegExp( "^(" + identifier + "|[*])" ),
- "ATTR": new RegExp( "^" + attributes ),
- "PSEUDO": new RegExp( "^" + pseudos ),
- "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace +
- "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace +
- "*(\\d+)|))" + whitespace + "*\\)|)", "i" ),
- "bool": new RegExp( "^(?:" + booleans + ")$", "i" ),
- // For use in libraries implementing .is()
- // We use this for POS matching in `select`
- "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" +
- whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" )
- },
-
- rinputs = /^(?:input|select|textarea|button)$/i,
- rheader = /^h\d$/i,
-
- rnative = /^[^{]+\{\s*\[native \w/,
-
- // Easily-parseable/retrievable ID or TAG or CLASS selectors
- rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,
-
- rsibling = /[+~]/,
-
- // CSS escapes
- // https://www.w3.org/TR/CSS21/syndata.html#escaped-characters
- runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ),
- funescape = function( _, escaped, escapedWhitespace ) {
- var high = "0x" + escaped - 0x10000;
- // NaN means non-codepoint
- // Support: Firefox<24
- // Workaround erroneous numeric interpretation of +"0x"
- return high !== high || escapedWhitespace ?
- escaped :
- high < 0 ?
- // BMP codepoint
- String.fromCharCode( high + 0x10000 ) :
- // Supplemental Plane codepoint (surrogate pair)
- String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 );
- },
-
- // CSS string/identifier serialization
- // https://drafts.csswg.org/cssom/#common-serializing-idioms
- rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,
- fcssescape = function( ch, asCodePoint ) {
- if ( asCodePoint ) {
-
- // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER
- if ( ch === "\0" ) {
- return "\uFFFD";
- }
-
- // Control characters and (dependent upon position) numbers get escaped as code points
- return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " ";
- }
-
- // Other potentially-special ASCII characters get backslash-escaped
- return "\\" + ch;
- },
-
- // Used for iframes
- // See setDocument()
- // Removing the function wrapper causes a "Permission Denied"
- // error in IE
- unloadHandler = function() {
- setDocument();
- },
-
- disabledAncestor = addCombinator(
- function( elem ) {
- return elem.disabled === true && ("form" in elem || "label" in elem);
- },
- { dir: "parentNode", next: "legend" }
- );
-
-// Optimize for push.apply( _, NodeList )
-try {
- push.apply(
- (arr = slice.call( preferredDoc.childNodes )),
- preferredDoc.childNodes
- );
- // Support: Android<4.0
- // Detect silently failing push.apply
- arr[ preferredDoc.childNodes.length ].nodeType;
-} catch ( e ) {
- push = { apply: arr.length ?
-
- // Leverage slice if possible
- function( target, els ) {
- push_native.apply( target, slice.call(els) );
- } :
-
- // Support: IE<9
- // Otherwise append directly
- function( target, els ) {
- var j = target.length,
- i = 0;
- // Can't trust NodeList.length
- while ( (target[j++] = els[i++]) ) {}
- target.length = j - 1;
- }
- };
-}
-
-function Sizzle( selector, context, results, seed ) {
- var m, i, elem, nid, match, groups, newSelector,
- newContext = context && context.ownerDocument,
-
- // nodeType defaults to 9, since context defaults to document
- nodeType = context ? context.nodeType : 9;
-
- results = results || [];
-
- // Return early from calls with invalid selector or context
- if ( typeof selector !== "string" || !selector ||
- nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) {
-
- return results;
- }
-
- // Try to shortcut find operations (as opposed to filters) in HTML documents
- if ( !seed ) {
-
- if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) {
- setDocument( context );
- }
- context = context || document;
-
- if ( documentIsHTML ) {
-
- // If the selector is sufficiently simple, try using a "get*By*" DOM method
- // (excepting DocumentFragment context, where the methods don't exist)
- if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) {
-
- // ID selector
- if ( (m = match[1]) ) {
-
- // Document context
- if ( nodeType === 9 ) {
- if ( (elem = context.getElementById( m )) ) {
-
- // Support: IE, Opera, Webkit
- // TODO: identify versions
- // getElementById can match elements by name instead of ID
- if ( elem.id === m ) {
- results.push( elem );
- return results;
- }
- } else {
- return results;
- }
-
- // Element context
- } else {
-
- // Support: IE, Opera, Webkit
- // TODO: identify versions
- // getElementById can match elements by name instead of ID
- if ( newContext && (elem = newContext.getElementById( m )) &&
- contains( context, elem ) &&
- elem.id === m ) {
-
- results.push( elem );
- return results;
- }
- }
-
- // Type selector
- } else if ( match[2] ) {
- push.apply( results, context.getElementsByTagName( selector ) );
- return results;
-
- // Class selector
- } else if ( (m = match[3]) && support.getElementsByClassName &&
- context.getElementsByClassName ) {
-
- push.apply( results, context.getElementsByClassName( m ) );
- return results;
- }
- }
-
- // Take advantage of querySelectorAll
- if ( support.qsa &&
- !compilerCache[ selector + " " ] &&
- (!rbuggyQSA || !rbuggyQSA.test( selector )) ) {
-
- if ( nodeType !== 1 ) {
- newContext = context;
- newSelector = selector;
-
- // qSA looks outside Element context, which is not what we want
- // Thanks to Andrew Dupont for this workaround technique
- // Support: IE <=8
- // Exclude object elements
- } else if ( context.nodeName.toLowerCase() !== "object" ) {
-
- // Capture the context ID, setting it first if necessary
- if ( (nid = context.getAttribute( "id" )) ) {
- nid = nid.replace( rcssescape, fcssescape );
- } else {
- context.setAttribute( "id", (nid = expando) );
- }
-
- // Prefix every selector in the list
- groups = tokenize( selector );
- i = groups.length;
- while ( i-- ) {
- groups[i] = "#" + nid + " " + toSelector( groups[i] );
- }
- newSelector = groups.join( "," );
-
- // Expand context for sibling selectors
- newContext = rsibling.test( selector ) && testContext( context.parentNode ) ||
- context;
- }
-
- if ( newSelector ) {
- try {
- push.apply( results,
- newContext.querySelectorAll( newSelector )
- );
- return results;
- } catch ( qsaError ) {
- } finally {
- if ( nid === expando ) {
- context.removeAttribute( "id" );
- }
- }
- }
- }
- }
- }
-
- // All others
- return select( selector.replace( rtrim, "$1" ), context, results, seed );
-}
-
-/**
- * Create key-value caches of limited size
- * @returns {function(string, object)} Returns the Object data after storing it on itself with
- * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength)
- * deleting the oldest entry
- */
-function createCache() {
- var keys = [];
-
- function cache( key, value ) {
- // Use (key + " ") to avoid collision with native prototype properties (see Issue #157)
- if ( keys.push( key + " " ) > Expr.cacheLength ) {
- // Only keep the most recent entries
- delete cache[ keys.shift() ];
- }
- return (cache[ key + " " ] = value);
- }
- return cache;
-}
-
-/**
- * Mark a function for special use by Sizzle
- * @param {Function} fn The function to mark
- */
-function markFunction( fn ) {
- fn[ expando ] = true;
- return fn;
-}
-
-/**
- * Support testing using an element
- * @param {Function} fn Passed the created element and returns a boolean result
- */
-function assert( fn ) {
- var el = document.createElement("fieldset");
-
- try {
- return !!fn( el );
- } catch (e) {
- return false;
- } finally {
- // Remove from its parent by default
- if ( el.parentNode ) {
- el.parentNode.removeChild( el );
- }
- // release memory in IE
- el = null;
- }
-}
-
-/**
- * Adds the same handler for all of the specified attrs
- * @param {String} attrs Pipe-separated list of attributes
- * @param {Function} handler The method that will be applied
- */
-function addHandle( attrs, handler ) {
- var arr = attrs.split("|"),
- i = arr.length;
-
- while ( i-- ) {
- Expr.attrHandle[ arr[i] ] = handler;
- }
-}
-
-/**
- * Checks document order of two siblings
- * @param {Element} a
- * @param {Element} b
- * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b
- */
-function siblingCheck( a, b ) {
- var cur = b && a,
- diff = cur && a.nodeType === 1 && b.nodeType === 1 &&
- a.sourceIndex - b.sourceIndex;
-
- // Use IE sourceIndex if available on both nodes
- if ( diff ) {
- return diff;
- }
-
- // Check if b follows a
- if ( cur ) {
- while ( (cur = cur.nextSibling) ) {
- if ( cur === b ) {
- return -1;
- }
- }
- }
-
- return a ? 1 : -1;
-}
-
-/**
- * Returns a function to use in pseudos for input types
- * @param {String} type
- */
-function createInputPseudo( type ) {
- return function( elem ) {
- var name = elem.nodeName.toLowerCase();
- return name === "input" && elem.type === type;
- };
-}
-
-/**
- * Returns a function to use in pseudos for buttons
- * @param {String} type
- */
-function createButtonPseudo( type ) {
- return function( elem ) {
- var name = elem.nodeName.toLowerCase();
- return (name === "input" || name === "button") && elem.type === type;
- };
-}
-
-/**
- * Returns a function to use in pseudos for :enabled/:disabled
- * @param {Boolean} disabled true for :disabled; false for :enabled
- */
-function createDisabledPseudo( disabled ) {
-
- // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable
- return function( elem ) {
-
- // Only certain elements can match :enabled or :disabled
- // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled
- // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled
- if ( "form" in elem ) {
-
- // Check for inherited disabledness on relevant non-disabled elements:
- // * listed form-associated elements in a disabled fieldset
- // https://html.spec.whatwg.org/multipage/forms.html#category-listed
- // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled
- // * option elements in a disabled optgroup
- // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled
- // All such elements have a "form" property.
- if ( elem.parentNode && elem.disabled === false ) {
-
- // Option elements defer to a parent optgroup if present
- if ( "label" in elem ) {
- if ( "label" in elem.parentNode ) {
- return elem.parentNode.disabled === disabled;
- } else {
- return elem.disabled === disabled;
- }
- }
-
- // Support: IE 6 - 11
- // Use the isDisabled shortcut property to check for disabled fieldset ancestors
- return elem.isDisabled === disabled ||
-
- // Where there is no isDisabled, check manually
- /* jshint -W018 */
- elem.isDisabled !== !disabled &&
- disabledAncestor( elem ) === disabled;
- }
-
- return elem.disabled === disabled;
-
- // Try to winnow out elements that can't be disabled before trusting the disabled property.
- // Some victims get caught in our net (label, legend, menu, track), but it shouldn't
- // even exist on them, let alone have a boolean value.
- } else if ( "label" in elem ) {
- return elem.disabled === disabled;
- }
-
- // Remaining elements are neither :enabled nor :disabled
- return false;
- };
-}
-
-/**
- * Returns a function to use in pseudos for positionals
- * @param {Function} fn
- */
-function createPositionalPseudo( fn ) {
- return markFunction(function( argument ) {
- argument = +argument;
- return markFunction(function( seed, matches ) {
- var j,
- matchIndexes = fn( [], seed.length, argument ),
- i = matchIndexes.length;
-
- // Match elements found at the specified indexes
- while ( i-- ) {
- if ( seed[ (j = matchIndexes[i]) ] ) {
- seed[j] = !(matches[j] = seed[j]);
- }
- }
- });
- });
-}
-
-/**
- * Checks a node for validity as a Sizzle context
- * @param {Element|Object=} context
- * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value
- */
-function testContext( context ) {
- return context && typeof context.getElementsByTagName !== "undefined" && context;
-}
-
-// Expose support vars for convenience
-support = Sizzle.support = {};
-
-/**
- * Detects XML nodes
- * @param {Element|Object} elem An element or a document
- * @returns {Boolean} True iff elem is a non-HTML XML node
- */
-isXML = Sizzle.isXML = function( elem ) {
- // documentElement is verified for cases where it doesn't yet exist
- // (such as loading iframes in IE - #4833)
- var documentElement = elem && (elem.ownerDocument || elem).documentElement;
- return documentElement ? documentElement.nodeName !== "HTML" : false;
-};
-
-/**
- * Sets document-related variables once based on the current document
- * @param {Element|Object} [doc] An element or document object to use to set the document
- * @returns {Object} Returns the current document
- */
-setDocument = Sizzle.setDocument = function( node ) {
- var hasCompare, subWindow,
- doc = node ? node.ownerDocument || node : preferredDoc;
-
- // Return early if doc is invalid or already selected
- if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) {
- return document;
- }
-
- // Update global variables
- document = doc;
- docElem = document.documentElement;
- documentIsHTML = !isXML( document );
-
- // Support: IE 9-11, Edge
- // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936)
- if ( preferredDoc !== document &&
- (subWindow = document.defaultView) && subWindow.top !== subWindow ) {
-
- // Support: IE 11, Edge
- if ( subWindow.addEventListener ) {
- subWindow.addEventListener( "unload", unloadHandler, false );
-
- // Support: IE 9 - 10 only
- } else if ( subWindow.attachEvent ) {
- subWindow.attachEvent( "onunload", unloadHandler );
- }
- }
-
- /* Attributes
- ---------------------------------------------------------------------- */
-
- // Support: IE<8
- // Verify that getAttribute really returns attributes and not properties
- // (excepting IE8 booleans)
- support.attributes = assert(function( el ) {
- el.className = "i";
- return !el.getAttribute("className");
- });
-
- /* getElement(s)By*
- ---------------------------------------------------------------------- */
-
- // Check if getElementsByTagName("*") returns only elements
- support.getElementsByTagName = assert(function( el ) {
- el.appendChild( document.createComment("") );
- return !el.getElementsByTagName("*").length;
- });
-
- // Support: IE<9
- support.getElementsByClassName = rnative.test( document.getElementsByClassName );
-
- // Support: IE<10
- // Check if getElementById returns elements by name
- // The broken getElementById methods don't pick up programmatically-set names,
- // so use a roundabout getElementsByName test
- support.getById = assert(function( el ) {
- docElem.appendChild( el ).id = expando;
- return !document.getElementsByName || !document.getElementsByName( expando ).length;
- });
-
- // ID filter and find
- if ( support.getById ) {
- Expr.filter["ID"] = function( id ) {
- var attrId = id.replace( runescape, funescape );
- return function( elem ) {
- return elem.getAttribute("id") === attrId;
- };
- };
- Expr.find["ID"] = function( id, context ) {
- if ( typeof context.getElementById !== "undefined" && documentIsHTML ) {
- var elem = context.getElementById( id );
- return elem ? [ elem ] : [];
- }
- };
- } else {
- Expr.filter["ID"] = function( id ) {
- var attrId = id.replace( runescape, funescape );
- return function( elem ) {
- var node = typeof elem.getAttributeNode !== "undefined" &&
- elem.getAttributeNode("id");
- return node && node.value === attrId;
- };
- };
-
- // Support: IE 6 - 7 only
- // getElementById is not reliable as a find shortcut
- Expr.find["ID"] = function( id, context ) {
- if ( typeof context.getElementById !== "undefined" && documentIsHTML ) {
- var node, i, elems,
- elem = context.getElementById( id );
-
- if ( elem ) {
-
- // Verify the id attribute
- node = elem.getAttributeNode("id");
- if ( node && node.value === id ) {
- return [ elem ];
- }
-
- // Fall back on getElementsByName
- elems = context.getElementsByName( id );
- i = 0;
- while ( (elem = elems[i++]) ) {
- node = elem.getAttributeNode("id");
- if ( node && node.value === id ) {
- return [ elem ];
- }
- }
- }
-
- return [];
- }
- };
- }
-
- // Tag
- Expr.find["TAG"] = support.getElementsByTagName ?
- function( tag, context ) {
- if ( typeof context.getElementsByTagName !== "undefined" ) {
- return context.getElementsByTagName( tag );
-
- // DocumentFragment nodes don't have gEBTN
- } else if ( support.qsa ) {
- return context.querySelectorAll( tag );
- }
- } :
-
- function( tag, context ) {
- var elem,
- tmp = [],
- i = 0,
- // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too
- results = context.getElementsByTagName( tag );
-
- // Filter out possible comments
- if ( tag === "*" ) {
- while ( (elem = results[i++]) ) {
- if ( elem.nodeType === 1 ) {
- tmp.push( elem );
- }
- }
-
- return tmp;
- }
- return results;
- };
-
- // Class
- Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) {
- if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) {
- return context.getElementsByClassName( className );
- }
- };
-
- /* QSA/matchesSelector
- ---------------------------------------------------------------------- */
-
- // QSA and matchesSelector support
-
- // matchesSelector(:active) reports false when true (IE9/Opera 11.5)
- rbuggyMatches = [];
-
- // qSa(:focus) reports false when true (Chrome 21)
- // We allow this because of a bug in IE8/9 that throws an error
- // whenever `document.activeElement` is accessed on an iframe
- // So, we allow :focus to pass through QSA all the time to avoid the IE error
- // See https://bugs.jquery.com/ticket/13378
- rbuggyQSA = [];
-
- if ( (support.qsa = rnative.test( document.querySelectorAll )) ) {
- // Build QSA regex
- // Regex strategy adopted from Diego Perini
- assert(function( el ) {
- // Select is set to empty string on purpose
- // This is to test IE's treatment of not explicitly
- // setting a boolean content attribute,
- // since its presence should be enough
- // https://bugs.jquery.com/ticket/12359
- docElem.appendChild( el ).innerHTML = " " +
- "" +
- " ";
-
- // Support: IE8, Opera 11-12.16
- // Nothing should be selected when empty strings follow ^= or $= or *=
- // The test attribute must be unknown in Opera but "safe" for WinRT
- // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section
- if ( el.querySelectorAll("[msallowcapture^='']").length ) {
- rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" );
- }
-
- // Support: IE8
- // Boolean attributes and "value" are not treated correctly
- if ( !el.querySelectorAll("[selected]").length ) {
- rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" );
- }
-
- // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+
- if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) {
- rbuggyQSA.push("~=");
- }
-
- // Webkit/Opera - :checked should return selected option elements
- // https://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
- // IE8 throws error here and will not see later tests
- if ( !el.querySelectorAll(":checked").length ) {
- rbuggyQSA.push(":checked");
- }
-
- // Support: Safari 8+, iOS 8+
- // https://bugs.webkit.org/show_bug.cgi?id=136851
- // In-page `selector#id sibling-combinator selector` fails
- if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) {
- rbuggyQSA.push(".#.+[+~]");
- }
- });
-
- assert(function( el ) {
- el.innerHTML = " " +
- " ";
-
- // Support: Windows 8 Native Apps
- // The type and name attributes are restricted during .innerHTML assignment
- var input = document.createElement("input");
- input.setAttribute( "type", "hidden" );
- el.appendChild( input ).setAttribute( "name", "D" );
-
- // Support: IE8
- // Enforce case-sensitivity of name attribute
- if ( el.querySelectorAll("[name=d]").length ) {
- rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" );
- }
-
- // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled)
- // IE8 throws error here and will not see later tests
- if ( el.querySelectorAll(":enabled").length !== 2 ) {
- rbuggyQSA.push( ":enabled", ":disabled" );
- }
-
- // Support: IE9-11+
- // IE's :disabled selector does not pick up the children of disabled fieldsets
- docElem.appendChild( el ).disabled = true;
- if ( el.querySelectorAll(":disabled").length !== 2 ) {
- rbuggyQSA.push( ":enabled", ":disabled" );
- }
-
- // Opera 10-11 does not throw on post-comma invalid pseudos
- el.querySelectorAll("*,:x");
- rbuggyQSA.push(",.*:");
- });
- }
-
- if ( (support.matchesSelector = rnative.test( (matches = docElem.matches ||
- docElem.webkitMatchesSelector ||
- docElem.mozMatchesSelector ||
- docElem.oMatchesSelector ||
- docElem.msMatchesSelector) )) ) {
-
- assert(function( el ) {
- // Check to see if it's possible to do matchesSelector
- // on a disconnected node (IE 9)
- support.disconnectedMatch = matches.call( el, "*" );
-
- // This should fail with an exception
- // Gecko does not error, returns false instead
- matches.call( el, "[s!='']:x" );
- rbuggyMatches.push( "!=", pseudos );
- });
- }
-
- rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") );
- rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") );
-
- /* Contains
- ---------------------------------------------------------------------- */
- hasCompare = rnative.test( docElem.compareDocumentPosition );
-
- // Element contains another
- // Purposefully self-exclusive
- // As in, an element does not contain itself
- contains = hasCompare || rnative.test( docElem.contains ) ?
- function( a, b ) {
- var adown = a.nodeType === 9 ? a.documentElement : a,
- bup = b && b.parentNode;
- return a === bup || !!( bup && bup.nodeType === 1 && (
- adown.contains ?
- adown.contains( bup ) :
- a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16
- ));
- } :
- function( a, b ) {
- if ( b ) {
- while ( (b = b.parentNode) ) {
- if ( b === a ) {
- return true;
- }
- }
- }
- return false;
- };
-
- /* Sorting
- ---------------------------------------------------------------------- */
-
- // Document order sorting
- sortOrder = hasCompare ?
- function( a, b ) {
-
- // Flag for duplicate removal
- if ( a === b ) {
- hasDuplicate = true;
- return 0;
- }
-
- // Sort on method existence if only one input has compareDocumentPosition
- var compare = !a.compareDocumentPosition - !b.compareDocumentPosition;
- if ( compare ) {
- return compare;
- }
-
- // Calculate position if both inputs belong to the same document
- compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ?
- a.compareDocumentPosition( b ) :
-
- // Otherwise we know they are disconnected
- 1;
-
- // Disconnected nodes
- if ( compare & 1 ||
- (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) {
-
- // Choose the first element that is related to our preferred document
- if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) {
- return -1;
- }
- if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) {
- return 1;
- }
-
- // Maintain original order
- return sortInput ?
- ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) :
- 0;
- }
-
- return compare & 4 ? -1 : 1;
- } :
- function( a, b ) {
- // Exit early if the nodes are identical
- if ( a === b ) {
- hasDuplicate = true;
- return 0;
- }
-
- var cur,
- i = 0,
- aup = a.parentNode,
- bup = b.parentNode,
- ap = [ a ],
- bp = [ b ];
-
- // Parentless nodes are either documents or disconnected
- if ( !aup || !bup ) {
- return a === document ? -1 :
- b === document ? 1 :
- aup ? -1 :
- bup ? 1 :
- sortInput ?
- ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) :
- 0;
-
- // If the nodes are siblings, we can do a quick check
- } else if ( aup === bup ) {
- return siblingCheck( a, b );
- }
-
- // Otherwise we need full lists of their ancestors for comparison
- cur = a;
- while ( (cur = cur.parentNode) ) {
- ap.unshift( cur );
- }
- cur = b;
- while ( (cur = cur.parentNode) ) {
- bp.unshift( cur );
- }
-
- // Walk down the tree looking for a discrepancy
- while ( ap[i] === bp[i] ) {
- i++;
- }
-
- return i ?
- // Do a sibling check if the nodes have a common ancestor
- siblingCheck( ap[i], bp[i] ) :
-
- // Otherwise nodes in our document sort first
- ap[i] === preferredDoc ? -1 :
- bp[i] === preferredDoc ? 1 :
- 0;
- };
-
- return document;
-};
-
-Sizzle.matches = function( expr, elements ) {
- return Sizzle( expr, null, null, elements );
-};
-
-Sizzle.matchesSelector = function( elem, expr ) {
- // Set document vars if needed
- if ( ( elem.ownerDocument || elem ) !== document ) {
- setDocument( elem );
- }
-
- // Make sure that attribute selectors are quoted
- expr = expr.replace( rattributeQuotes, "='$1']" );
-
- if ( support.matchesSelector && documentIsHTML &&
- !compilerCache[ expr + " " ] &&
- ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) &&
- ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) {
-
- try {
- var ret = matches.call( elem, expr );
-
- // IE 9's matchesSelector returns false on disconnected nodes
- if ( ret || support.disconnectedMatch ||
- // As well, disconnected nodes are said to be in a document
- // fragment in IE 9
- elem.document && elem.document.nodeType !== 11 ) {
- return ret;
- }
- } catch (e) {}
- }
-
- return Sizzle( expr, document, null, [ elem ] ).length > 0;
-};
-
-Sizzle.contains = function( context, elem ) {
- // Set document vars if needed
- if ( ( context.ownerDocument || context ) !== document ) {
- setDocument( context );
- }
- return contains( context, elem );
-};
-
-Sizzle.attr = function( elem, name ) {
- // Set document vars if needed
- if ( ( elem.ownerDocument || elem ) !== document ) {
- setDocument( elem );
- }
-
- var fn = Expr.attrHandle[ name.toLowerCase() ],
- // Don't get fooled by Object.prototype properties (jQuery #13807)
- val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ?
- fn( elem, name, !documentIsHTML ) :
- undefined;
-
- return val !== undefined ?
- val :
- support.attributes || !documentIsHTML ?
- elem.getAttribute( name ) :
- (val = elem.getAttributeNode(name)) && val.specified ?
- val.value :
- null;
-};
-
-Sizzle.escape = function( sel ) {
- return (sel + "").replace( rcssescape, fcssescape );
-};
-
-Sizzle.error = function( msg ) {
- throw new Error( "Syntax error, unrecognized expression: " + msg );
-};
-
-/**
- * Document sorting and removing duplicates
- * @param {ArrayLike} results
- */
-Sizzle.uniqueSort = function( results ) {
- var elem,
- duplicates = [],
- j = 0,
- i = 0;
-
- // Unless we *know* we can detect duplicates, assume their presence
- hasDuplicate = !support.detectDuplicates;
- sortInput = !support.sortStable && results.slice( 0 );
- results.sort( sortOrder );
-
- if ( hasDuplicate ) {
- while ( (elem = results[i++]) ) {
- if ( elem === results[ i ] ) {
- j = duplicates.push( i );
- }
- }
- while ( j-- ) {
- results.splice( duplicates[ j ], 1 );
- }
- }
-
- // Clear input after sorting to release objects
- // See https://github.com/jquery/sizzle/pull/225
- sortInput = null;
-
- return results;
-};
-
-/**
- * Utility function for retrieving the text value of an array of DOM nodes
- * @param {Array|Element} elem
- */
-getText = Sizzle.getText = function( elem ) {
- var node,
- ret = "",
- i = 0,
- nodeType = elem.nodeType;
-
- if ( !nodeType ) {
- // If no nodeType, this is expected to be an array
- while ( (node = elem[i++]) ) {
- // Do not traverse comment nodes
- ret += getText( node );
- }
- } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) {
- // Use textContent for elements
- // innerText usage removed for consistency of new lines (jQuery #11153)
- if ( typeof elem.textContent === "string" ) {
- return elem.textContent;
- } else {
- // Traverse its children
- for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) {
- ret += getText( elem );
- }
- }
- } else if ( nodeType === 3 || nodeType === 4 ) {
- return elem.nodeValue;
- }
- // Do not include comment or processing instruction nodes
-
- return ret;
-};
-
-Expr = Sizzle.selectors = {
-
- // Can be adjusted by the user
- cacheLength: 50,
-
- createPseudo: markFunction,
-
- match: matchExpr,
-
- attrHandle: {},
-
- find: {},
-
- relative: {
- ">": { dir: "parentNode", first: true },
- " ": { dir: "parentNode" },
- "+": { dir: "previousSibling", first: true },
- "~": { dir: "previousSibling" }
- },
-
- preFilter: {
- "ATTR": function( match ) {
- match[1] = match[1].replace( runescape, funescape );
-
- // Move the given value to match[3] whether quoted or unquoted
- match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape );
-
- if ( match[2] === "~=" ) {
- match[3] = " " + match[3] + " ";
- }
-
- return match.slice( 0, 4 );
- },
-
- "CHILD": function( match ) {
- /* matches from matchExpr["CHILD"]
- 1 type (only|nth|...)
- 2 what (child|of-type)
- 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...)
- 4 xn-component of xn+y argument ([+-]?\d*n|)
- 5 sign of xn-component
- 6 x of xn-component
- 7 sign of y-component
- 8 y of y-component
- */
- match[1] = match[1].toLowerCase();
-
- if ( match[1].slice( 0, 3 ) === "nth" ) {
- // nth-* requires argument
- if ( !match[3] ) {
- Sizzle.error( match[0] );
- }
-
- // numeric x and y parameters for Expr.filter.CHILD
- // remember that false/true cast respectively to 0/1
- match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) );
- match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" );
-
- // other types prohibit arguments
- } else if ( match[3] ) {
- Sizzle.error( match[0] );
- }
-
- return match;
- },
-
- "PSEUDO": function( match ) {
- var excess,
- unquoted = !match[6] && match[2];
-
- if ( matchExpr["CHILD"].test( match[0] ) ) {
- return null;
- }
-
- // Accept quoted arguments as-is
- if ( match[3] ) {
- match[2] = match[4] || match[5] || "";
-
- // Strip excess characters from unquoted arguments
- } else if ( unquoted && rpseudo.test( unquoted ) &&
- // Get excess from tokenize (recursively)
- (excess = tokenize( unquoted, true )) &&
- // advance to the next closing parenthesis
- (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) {
-
- // excess is a negative index
- match[0] = match[0].slice( 0, excess );
- match[2] = unquoted.slice( 0, excess );
- }
-
- // Return only captures needed by the pseudo filter method (type and argument)
- return match.slice( 0, 3 );
- }
- },
-
- filter: {
-
- "TAG": function( nodeNameSelector ) {
- var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase();
- return nodeNameSelector === "*" ?
- function() { return true; } :
- function( elem ) {
- return elem.nodeName && elem.nodeName.toLowerCase() === nodeName;
- };
- },
-
- "CLASS": function( className ) {
- var pattern = classCache[ className + " " ];
-
- return pattern ||
- (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) &&
- classCache( className, function( elem ) {
- return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" );
- });
- },
-
- "ATTR": function( name, operator, check ) {
- return function( elem ) {
- var result = Sizzle.attr( elem, name );
-
- if ( result == null ) {
- return operator === "!=";
- }
- if ( !operator ) {
- return true;
- }
-
- result += "";
-
- return operator === "=" ? result === check :
- operator === "!=" ? result !== check :
- operator === "^=" ? check && result.indexOf( check ) === 0 :
- operator === "*=" ? check && result.indexOf( check ) > -1 :
- operator === "$=" ? check && result.slice( -check.length ) === check :
- operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 :
- operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" :
- false;
- };
- },
-
- "CHILD": function( type, what, argument, first, last ) {
- var simple = type.slice( 0, 3 ) !== "nth",
- forward = type.slice( -4 ) !== "last",
- ofType = what === "of-type";
-
- return first === 1 && last === 0 ?
-
- // Shortcut for :nth-*(n)
- function( elem ) {
- return !!elem.parentNode;
- } :
-
- function( elem, context, xml ) {
- var cache, uniqueCache, outerCache, node, nodeIndex, start,
- dir = simple !== forward ? "nextSibling" : "previousSibling",
- parent = elem.parentNode,
- name = ofType && elem.nodeName.toLowerCase(),
- useCache = !xml && !ofType,
- diff = false;
-
- if ( parent ) {
-
- // :(first|last|only)-(child|of-type)
- if ( simple ) {
- while ( dir ) {
- node = elem;
- while ( (node = node[ dir ]) ) {
- if ( ofType ?
- node.nodeName.toLowerCase() === name :
- node.nodeType === 1 ) {
-
- return false;
- }
- }
- // Reverse direction for :only-* (if we haven't yet done so)
- start = dir = type === "only" && !start && "nextSibling";
- }
- return true;
- }
-
- start = [ forward ? parent.firstChild : parent.lastChild ];
-
- // non-xml :nth-child(...) stores cache data on `parent`
- if ( forward && useCache ) {
-
- // Seek `elem` from a previously-cached index
-
- // ...in a gzip-friendly way
- node = parent;
- outerCache = node[ expando ] || (node[ expando ] = {});
-
- // Support: IE <9 only
- // Defend against cloned attroperties (jQuery gh-1709)
- uniqueCache = outerCache[ node.uniqueID ] ||
- (outerCache[ node.uniqueID ] = {});
-
- cache = uniqueCache[ type ] || [];
- nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ];
- diff = nodeIndex && cache[ 2 ];
- node = nodeIndex && parent.childNodes[ nodeIndex ];
-
- while ( (node = ++nodeIndex && node && node[ dir ] ||
-
- // Fallback to seeking `elem` from the start
- (diff = nodeIndex = 0) || start.pop()) ) {
-
- // When found, cache indexes on `parent` and break
- if ( node.nodeType === 1 && ++diff && node === elem ) {
- uniqueCache[ type ] = [ dirruns, nodeIndex, diff ];
- break;
- }
- }
-
- } else {
- // Use previously-cached element index if available
- if ( useCache ) {
- // ...in a gzip-friendly way
- node = elem;
- outerCache = node[ expando ] || (node[ expando ] = {});
-
- // Support: IE <9 only
- // Defend against cloned attroperties (jQuery gh-1709)
- uniqueCache = outerCache[ node.uniqueID ] ||
- (outerCache[ node.uniqueID ] = {});
-
- cache = uniqueCache[ type ] || [];
- nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ];
- diff = nodeIndex;
- }
-
- // xml :nth-child(...)
- // or :nth-last-child(...) or :nth(-last)?-of-type(...)
- if ( diff === false ) {
- // Use the same loop as above to seek `elem` from the start
- while ( (node = ++nodeIndex && node && node[ dir ] ||
- (diff = nodeIndex = 0) || start.pop()) ) {
-
- if ( ( ofType ?
- node.nodeName.toLowerCase() === name :
- node.nodeType === 1 ) &&
- ++diff ) {
-
- // Cache the index of each encountered element
- if ( useCache ) {
- outerCache = node[ expando ] || (node[ expando ] = {});
-
- // Support: IE <9 only
- // Defend against cloned attroperties (jQuery gh-1709)
- uniqueCache = outerCache[ node.uniqueID ] ||
- (outerCache[ node.uniqueID ] = {});
-
- uniqueCache[ type ] = [ dirruns, diff ];
- }
-
- if ( node === elem ) {
- break;
- }
- }
- }
- }
- }
-
- // Incorporate the offset, then check against cycle size
- diff -= last;
- return diff === first || ( diff % first === 0 && diff / first >= 0 );
- }
- };
- },
-
- "PSEUDO": function( pseudo, argument ) {
- // pseudo-class names are case-insensitive
- // https://www.w3.org/TR/selectors/#pseudo-classes
- // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters
- // Remember that setFilters inherits from pseudos
- var args,
- fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] ||
- Sizzle.error( "unsupported pseudo: " + pseudo );
-
- // The user may use createPseudo to indicate that
- // arguments are needed to create the filter function
- // just as Sizzle does
- if ( fn[ expando ] ) {
- return fn( argument );
- }
-
- // But maintain support for old signatures
- if ( fn.length > 1 ) {
- args = [ pseudo, pseudo, "", argument ];
- return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ?
- markFunction(function( seed, matches ) {
- var idx,
- matched = fn( seed, argument ),
- i = matched.length;
- while ( i-- ) {
- idx = indexOf( seed, matched[i] );
- seed[ idx ] = !( matches[ idx ] = matched[i] );
- }
- }) :
- function( elem ) {
- return fn( elem, 0, args );
- };
- }
-
- return fn;
- }
- },
-
- pseudos: {
- // Potentially complex pseudos
- "not": markFunction(function( selector ) {
- // Trim the selector passed to compile
- // to avoid treating leading and trailing
- // spaces as combinators
- var input = [],
- results = [],
- matcher = compile( selector.replace( rtrim, "$1" ) );
-
- return matcher[ expando ] ?
- markFunction(function( seed, matches, context, xml ) {
- var elem,
- unmatched = matcher( seed, null, xml, [] ),
- i = seed.length;
-
- // Match elements unmatched by `matcher`
- while ( i-- ) {
- if ( (elem = unmatched[i]) ) {
- seed[i] = !(matches[i] = elem);
- }
- }
- }) :
- function( elem, context, xml ) {
- input[0] = elem;
- matcher( input, null, xml, results );
- // Don't keep the element (issue #299)
- input[0] = null;
- return !results.pop();
- };
- }),
-
- "has": markFunction(function( selector ) {
- return function( elem ) {
- return Sizzle( selector, elem ).length > 0;
- };
- }),
-
- "contains": markFunction(function( text ) {
- text = text.replace( runescape, funescape );
- return function( elem ) {
- return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1;
- };
- }),
-
- // "Whether an element is represented by a :lang() selector
- // is based solely on the element's language value
- // being equal to the identifier C,
- // or beginning with the identifier C immediately followed by "-".
- // The matching of C against the element's language value is performed case-insensitively.
- // The identifier C does not have to be a valid language name."
- // https://www.w3.org/TR/selectors/#lang-pseudo
- "lang": markFunction( function( lang ) {
- // lang value must be a valid identifier
- if ( !ridentifier.test(lang || "") ) {
- Sizzle.error( "unsupported lang: " + lang );
- }
- lang = lang.replace( runescape, funescape ).toLowerCase();
- return function( elem ) {
- var elemLang;
- do {
- if ( (elemLang = documentIsHTML ?
- elem.lang :
- elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) {
-
- elemLang = elemLang.toLowerCase();
- return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0;
- }
- } while ( (elem = elem.parentNode) && elem.nodeType === 1 );
- return false;
- };
- }),
-
- // Miscellaneous
- "target": function( elem ) {
- var hash = window.location && window.location.hash;
- return hash && hash.slice( 1 ) === elem.id;
- },
-
- "root": function( elem ) {
- return elem === docElem;
- },
-
- "focus": function( elem ) {
- return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex);
- },
-
- // Boolean properties
- "enabled": createDisabledPseudo( false ),
- "disabled": createDisabledPseudo( true ),
-
- "checked": function( elem ) {
- // In CSS3, :checked should return both checked and selected elements
- // https://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
- var nodeName = elem.nodeName.toLowerCase();
- return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected);
- },
-
- "selected": function( elem ) {
- // Accessing this property makes selected-by-default
- // options in Safari work properly
- if ( elem.parentNode ) {
- elem.parentNode.selectedIndex;
- }
-
- return elem.selected === true;
- },
-
- // Contents
- "empty": function( elem ) {
- // https://www.w3.org/TR/selectors/#empty-pseudo
- // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5),
- // but not by others (comment: 8; processing instruction: 7; etc.)
- // nodeType < 6 works because attributes (2) do not appear as children
- for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) {
- if ( elem.nodeType < 6 ) {
- return false;
- }
- }
- return true;
- },
-
- "parent": function( elem ) {
- return !Expr.pseudos["empty"]( elem );
- },
-
- // Element/input types
- "header": function( elem ) {
- return rheader.test( elem.nodeName );
- },
-
- "input": function( elem ) {
- return rinputs.test( elem.nodeName );
- },
-
- "button": function( elem ) {
- var name = elem.nodeName.toLowerCase();
- return name === "input" && elem.type === "button" || name === "button";
- },
-
- "text": function( elem ) {
- var attr;
- return elem.nodeName.toLowerCase() === "input" &&
- elem.type === "text" &&
-
- // Support: IE<8
- // New HTML5 attribute values (e.g., "search") appear with elem.type === "text"
- ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" );
- },
-
- // Position-in-collection
- "first": createPositionalPseudo(function() {
- return [ 0 ];
- }),
-
- "last": createPositionalPseudo(function( matchIndexes, length ) {
- return [ length - 1 ];
- }),
-
- "eq": createPositionalPseudo(function( matchIndexes, length, argument ) {
- return [ argument < 0 ? argument + length : argument ];
- }),
-
- "even": createPositionalPseudo(function( matchIndexes, length ) {
- var i = 0;
- for ( ; i < length; i += 2 ) {
- matchIndexes.push( i );
- }
- return matchIndexes;
- }),
-
- "odd": createPositionalPseudo(function( matchIndexes, length ) {
- var i = 1;
- for ( ; i < length; i += 2 ) {
- matchIndexes.push( i );
- }
- return matchIndexes;
- }),
-
- "lt": createPositionalPseudo(function( matchIndexes, length, argument ) {
- var i = argument < 0 ? argument + length : argument;
- for ( ; --i >= 0; ) {
- matchIndexes.push( i );
- }
- return matchIndexes;
- }),
-
- "gt": createPositionalPseudo(function( matchIndexes, length, argument ) {
- var i = argument < 0 ? argument + length : argument;
- for ( ; ++i < length; ) {
- matchIndexes.push( i );
- }
- return matchIndexes;
- })
- }
-};
-
-Expr.pseudos["nth"] = Expr.pseudos["eq"];
-
-// Add button/input type pseudos
-for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) {
- Expr.pseudos[ i ] = createInputPseudo( i );
-}
-for ( i in { submit: true, reset: true } ) {
- Expr.pseudos[ i ] = createButtonPseudo( i );
-}
-
-// Easy API for creating new setFilters
-function setFilters() {}
-setFilters.prototype = Expr.filters = Expr.pseudos;
-Expr.setFilters = new setFilters();
-
-tokenize = Sizzle.tokenize = function( selector, parseOnly ) {
- var matched, match, tokens, type,
- soFar, groups, preFilters,
- cached = tokenCache[ selector + " " ];
-
- if ( cached ) {
- return parseOnly ? 0 : cached.slice( 0 );
- }
-
- soFar = selector;
- groups = [];
- preFilters = Expr.preFilter;
-
- while ( soFar ) {
-
- // Comma and first run
- if ( !matched || (match = rcomma.exec( soFar )) ) {
- if ( match ) {
- // Don't consume trailing commas as valid
- soFar = soFar.slice( match[0].length ) || soFar;
- }
- groups.push( (tokens = []) );
- }
-
- matched = false;
-
- // Combinators
- if ( (match = rcombinators.exec( soFar )) ) {
- matched = match.shift();
- tokens.push({
- value: matched,
- // Cast descendant combinators to space
- type: match[0].replace( rtrim, " " )
- });
- soFar = soFar.slice( matched.length );
- }
-
- // Filters
- for ( type in Expr.filter ) {
- if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] ||
- (match = preFilters[ type ]( match ))) ) {
- matched = match.shift();
- tokens.push({
- value: matched,
- type: type,
- matches: match
- });
- soFar = soFar.slice( matched.length );
- }
- }
-
- if ( !matched ) {
- break;
- }
- }
-
- // Return the length of the invalid excess
- // if we're just parsing
- // Otherwise, throw an error or return tokens
- return parseOnly ?
- soFar.length :
- soFar ?
- Sizzle.error( selector ) :
- // Cache the tokens
- tokenCache( selector, groups ).slice( 0 );
-};
-
-function toSelector( tokens ) {
- var i = 0,
- len = tokens.length,
- selector = "";
- for ( ; i < len; i++ ) {
- selector += tokens[i].value;
- }
- return selector;
-}
-
-function addCombinator( matcher, combinator, base ) {
- var dir = combinator.dir,
- skip = combinator.next,
- key = skip || dir,
- checkNonElements = base && key === "parentNode",
- doneName = done++;
-
- return combinator.first ?
- // Check against closest ancestor/preceding element
- function( elem, context, xml ) {
- while ( (elem = elem[ dir ]) ) {
- if ( elem.nodeType === 1 || checkNonElements ) {
- return matcher( elem, context, xml );
- }
- }
- return false;
- } :
-
- // Check against all ancestor/preceding elements
- function( elem, context, xml ) {
- var oldCache, uniqueCache, outerCache,
- newCache = [ dirruns, doneName ];
-
- // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching
- if ( xml ) {
- while ( (elem = elem[ dir ]) ) {
- if ( elem.nodeType === 1 || checkNonElements ) {
- if ( matcher( elem, context, xml ) ) {
- return true;
- }
- }
- }
- } else {
- while ( (elem = elem[ dir ]) ) {
- if ( elem.nodeType === 1 || checkNonElements ) {
- outerCache = elem[ expando ] || (elem[ expando ] = {});
-
- // Support: IE <9 only
- // Defend against cloned attroperties (jQuery gh-1709)
- uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {});
-
- if ( skip && skip === elem.nodeName.toLowerCase() ) {
- elem = elem[ dir ] || elem;
- } else if ( (oldCache = uniqueCache[ key ]) &&
- oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) {
-
- // Assign to newCache so results back-propagate to previous elements
- return (newCache[ 2 ] = oldCache[ 2 ]);
- } else {
- // Reuse newcache so results back-propagate to previous elements
- uniqueCache[ key ] = newCache;
-
- // A match means we're done; a fail means we have to keep checking
- if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) {
- return true;
- }
- }
- }
- }
- }
- return false;
- };
-}
-
-function elementMatcher( matchers ) {
- return matchers.length > 1 ?
- function( elem, context, xml ) {
- var i = matchers.length;
- while ( i-- ) {
- if ( !matchers[i]( elem, context, xml ) ) {
- return false;
- }
- }
- return true;
- } :
- matchers[0];
-}
-
-function multipleContexts( selector, contexts, results ) {
- var i = 0,
- len = contexts.length;
- for ( ; i < len; i++ ) {
- Sizzle( selector, contexts[i], results );
- }
- return results;
-}
-
-function condense( unmatched, map, filter, context, xml ) {
- var elem,
- newUnmatched = [],
- i = 0,
- len = unmatched.length,
- mapped = map != null;
-
- for ( ; i < len; i++ ) {
- if ( (elem = unmatched[i]) ) {
- if ( !filter || filter( elem, context, xml ) ) {
- newUnmatched.push( elem );
- if ( mapped ) {
- map.push( i );
- }
- }
- }
- }
-
- return newUnmatched;
-}
-
-function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) {
- if ( postFilter && !postFilter[ expando ] ) {
- postFilter = setMatcher( postFilter );
- }
- if ( postFinder && !postFinder[ expando ] ) {
- postFinder = setMatcher( postFinder, postSelector );
- }
- return markFunction(function( seed, results, context, xml ) {
- var temp, i, elem,
- preMap = [],
- postMap = [],
- preexisting = results.length,
-
- // Get initial elements from seed or context
- elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ),
-
- // Prefilter to get matcher input, preserving a map for seed-results synchronization
- matcherIn = preFilter && ( seed || !selector ) ?
- condense( elems, preMap, preFilter, context, xml ) :
- elems,
-
- matcherOut = matcher ?
- // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results,
- postFinder || ( seed ? preFilter : preexisting || postFilter ) ?
-
- // ...intermediate processing is necessary
- [] :
-
- // ...otherwise use results directly
- results :
- matcherIn;
-
- // Find primary matches
- if ( matcher ) {
- matcher( matcherIn, matcherOut, context, xml );
- }
-
- // Apply postFilter
- if ( postFilter ) {
- temp = condense( matcherOut, postMap );
- postFilter( temp, [], context, xml );
-
- // Un-match failing elements by moving them back to matcherIn
- i = temp.length;
- while ( i-- ) {
- if ( (elem = temp[i]) ) {
- matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem);
- }
- }
- }
-
- if ( seed ) {
- if ( postFinder || preFilter ) {
- if ( postFinder ) {
- // Get the final matcherOut by condensing this intermediate into postFinder contexts
- temp = [];
- i = matcherOut.length;
- while ( i-- ) {
- if ( (elem = matcherOut[i]) ) {
- // Restore matcherIn since elem is not yet a final match
- temp.push( (matcherIn[i] = elem) );
- }
- }
- postFinder( null, (matcherOut = []), temp, xml );
- }
-
- // Move matched elements from seed to results to keep them synchronized
- i = matcherOut.length;
- while ( i-- ) {
- if ( (elem = matcherOut[i]) &&
- (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) {
-
- seed[temp] = !(results[temp] = elem);
- }
- }
- }
-
- // Add elements to results, through postFinder if defined
- } else {
- matcherOut = condense(
- matcherOut === results ?
- matcherOut.splice( preexisting, matcherOut.length ) :
- matcherOut
- );
- if ( postFinder ) {
- postFinder( null, results, matcherOut, xml );
- } else {
- push.apply( results, matcherOut );
- }
- }
- });
-}
-
-function matcherFromTokens( tokens ) {
- var checkContext, matcher, j,
- len = tokens.length,
- leadingRelative = Expr.relative[ tokens[0].type ],
- implicitRelative = leadingRelative || Expr.relative[" "],
- i = leadingRelative ? 1 : 0,
-
- // The foundational matcher ensures that elements are reachable from top-level context(s)
- matchContext = addCombinator( function( elem ) {
- return elem === checkContext;
- }, implicitRelative, true ),
- matchAnyContext = addCombinator( function( elem ) {
- return indexOf( checkContext, elem ) > -1;
- }, implicitRelative, true ),
- matchers = [ function( elem, context, xml ) {
- var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || (
- (checkContext = context).nodeType ?
- matchContext( elem, context, xml ) :
- matchAnyContext( elem, context, xml ) );
- // Avoid hanging onto element (issue #299)
- checkContext = null;
- return ret;
- } ];
-
- for ( ; i < len; i++ ) {
- if ( (matcher = Expr.relative[ tokens[i].type ]) ) {
- matchers = [ addCombinator(elementMatcher( matchers ), matcher) ];
- } else {
- matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches );
-
- // Return special upon seeing a positional matcher
- if ( matcher[ expando ] ) {
- // Find the next relative operator (if any) for proper handling
- j = ++i;
- for ( ; j < len; j++ ) {
- if ( Expr.relative[ tokens[j].type ] ) {
- break;
- }
- }
- return setMatcher(
- i > 1 && elementMatcher( matchers ),
- i > 1 && toSelector(
- // If the preceding token was a descendant combinator, insert an implicit any-element `*`
- tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" })
- ).replace( rtrim, "$1" ),
- matcher,
- i < j && matcherFromTokens( tokens.slice( i, j ) ),
- j < len && matcherFromTokens( (tokens = tokens.slice( j )) ),
- j < len && toSelector( tokens )
- );
- }
- matchers.push( matcher );
- }
- }
-
- return elementMatcher( matchers );
-}
-
-function matcherFromGroupMatchers( elementMatchers, setMatchers ) {
- var bySet = setMatchers.length > 0,
- byElement = elementMatchers.length > 0,
- superMatcher = function( seed, context, xml, results, outermost ) {
- var elem, j, matcher,
- matchedCount = 0,
- i = "0",
- unmatched = seed && [],
- setMatched = [],
- contextBackup = outermostContext,
- // We must always have either seed elements or outermost context
- elems = seed || byElement && Expr.find["TAG"]( "*", outermost ),
- // Use integer dirruns iff this is the outermost matcher
- dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1),
- len = elems.length;
-
- if ( outermost ) {
- outermostContext = context === document || context || outermost;
- }
-
- // Add elements passing elementMatchers directly to results
- // Support: IE<9, Safari
- // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id
- for ( ; i !== len && (elem = elems[i]) != null; i++ ) {
- if ( byElement && elem ) {
- j = 0;
- if ( !context && elem.ownerDocument !== document ) {
- setDocument( elem );
- xml = !documentIsHTML;
- }
- while ( (matcher = elementMatchers[j++]) ) {
- if ( matcher( elem, context || document, xml) ) {
- results.push( elem );
- break;
- }
- }
- if ( outermost ) {
- dirruns = dirrunsUnique;
- }
- }
-
- // Track unmatched elements for set filters
- if ( bySet ) {
- // They will have gone through all possible matchers
- if ( (elem = !matcher && elem) ) {
- matchedCount--;
- }
-
- // Lengthen the array for every element, matched or not
- if ( seed ) {
- unmatched.push( elem );
- }
- }
- }
-
- // `i` is now the count of elements visited above, and adding it to `matchedCount`
- // makes the latter nonnegative.
- matchedCount += i;
-
- // Apply set filters to unmatched elements
- // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount`
- // equals `i`), unless we didn't visit _any_ elements in the above loop because we have
- // no element matchers and no seed.
- // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that
- // case, which will result in a "00" `matchedCount` that differs from `i` but is also
- // numerically zero.
- if ( bySet && i !== matchedCount ) {
- j = 0;
- while ( (matcher = setMatchers[j++]) ) {
- matcher( unmatched, setMatched, context, xml );
- }
-
- if ( seed ) {
- // Reintegrate element matches to eliminate the need for sorting
- if ( matchedCount > 0 ) {
- while ( i-- ) {
- if ( !(unmatched[i] || setMatched[i]) ) {
- setMatched[i] = pop.call( results );
- }
- }
- }
-
- // Discard index placeholder values to get only actual matches
- setMatched = condense( setMatched );
- }
-
- // Add matches to results
- push.apply( results, setMatched );
-
- // Seedless set matches succeeding multiple successful matchers stipulate sorting
- if ( outermost && !seed && setMatched.length > 0 &&
- ( matchedCount + setMatchers.length ) > 1 ) {
-
- Sizzle.uniqueSort( results );
- }
- }
-
- // Override manipulation of globals by nested matchers
- if ( outermost ) {
- dirruns = dirrunsUnique;
- outermostContext = contextBackup;
- }
-
- return unmatched;
- };
-
- return bySet ?
- markFunction( superMatcher ) :
- superMatcher;
-}
-
-compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) {
- var i,
- setMatchers = [],
- elementMatchers = [],
- cached = compilerCache[ selector + " " ];
-
- if ( !cached ) {
- // Generate a function of recursive functions that can be used to check each element
- if ( !match ) {
- match = tokenize( selector );
- }
- i = match.length;
- while ( i-- ) {
- cached = matcherFromTokens( match[i] );
- if ( cached[ expando ] ) {
- setMatchers.push( cached );
- } else {
- elementMatchers.push( cached );
- }
- }
-
- // Cache the compiled function
- cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) );
-
- // Save selector and tokenization
- cached.selector = selector;
- }
- return cached;
-};
-
-/**
- * A low-level selection function that works with Sizzle's compiled
- * selector functions
- * @param {String|Function} selector A selector or a pre-compiled
- * selector function built with Sizzle.compile
- * @param {Element} context
- * @param {Array} [results]
- * @param {Array} [seed] A set of elements to match against
- */
-select = Sizzle.select = function( selector, context, results, seed ) {
- var i, tokens, token, type, find,
- compiled = typeof selector === "function" && selector,
- match = !seed && tokenize( (selector = compiled.selector || selector) );
-
- results = results || [];
-
- // Try to minimize operations if there is only one selector in the list and no seed
- // (the latter of which guarantees us context)
- if ( match.length === 1 ) {
-
- // Reduce context if the leading compound selector is an ID
- tokens = match[0] = match[0].slice( 0 );
- if ( tokens.length > 2 && (token = tokens[0]).type === "ID" &&
- context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) {
-
- context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0];
- if ( !context ) {
- return results;
-
- // Precompiled matchers will still verify ancestry, so step up a level
- } else if ( compiled ) {
- context = context.parentNode;
- }
-
- selector = selector.slice( tokens.shift().value.length );
- }
-
- // Fetch a seed set for right-to-left matching
- i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length;
- while ( i-- ) {
- token = tokens[i];
-
- // Abort if we hit a combinator
- if ( Expr.relative[ (type = token.type) ] ) {
- break;
- }
- if ( (find = Expr.find[ type ]) ) {
- // Search, expanding context for leading sibling combinators
- if ( (seed = find(
- token.matches[0].replace( runescape, funescape ),
- rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context
- )) ) {
-
- // If seed is empty or no tokens remain, we can return early
- tokens.splice( i, 1 );
- selector = seed.length && toSelector( tokens );
- if ( !selector ) {
- push.apply( results, seed );
- return results;
- }
-
- break;
- }
- }
- }
- }
-
- // Compile and execute a filtering function if one is not provided
- // Provide `match` to avoid retokenization if we modified the selector above
- ( compiled || compile( selector, match ) )(
- seed,
- context,
- !documentIsHTML,
- results,
- !context || rsibling.test( selector ) && testContext( context.parentNode ) || context
- );
- return results;
-};
-
-// One-time assignments
-
-// Sort stability
-support.sortStable = expando.split("").sort( sortOrder ).join("") === expando;
-
-// Support: Chrome 14-35+
-// Always assume duplicates if they aren't passed to the comparison function
-support.detectDuplicates = !!hasDuplicate;
-
-// Initialize against the default document
-setDocument();
-
-// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27)
-// Detached nodes confoundingly follow *each other*
-support.sortDetached = assert(function( el ) {
- // Should return 1, but returns 4 (following)
- return el.compareDocumentPosition( document.createElement("fieldset") ) & 1;
-});
-
-// Support: IE<8
-// Prevent attribute/property "interpolation"
-// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx
-if ( !assert(function( el ) {
- el.innerHTML = " ";
- return el.firstChild.getAttribute("href") === "#" ;
-}) ) {
- addHandle( "type|href|height|width", function( elem, name, isXML ) {
- if ( !isXML ) {
- return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 );
- }
- });
-}
-
-// Support: IE<9
-// Use defaultValue in place of getAttribute("value")
-if ( !support.attributes || !assert(function( el ) {
- el.innerHTML = " ";
- el.firstChild.setAttribute( "value", "" );
- return el.firstChild.getAttribute( "value" ) === "";
-}) ) {
- addHandle( "value", function( elem, name, isXML ) {
- if ( !isXML && elem.nodeName.toLowerCase() === "input" ) {
- return elem.defaultValue;
- }
- });
-}
-
-// Support: IE<9
-// Use getAttributeNode to fetch booleans when getAttribute lies
-if ( !assert(function( el ) {
- return el.getAttribute("disabled") == null;
-}) ) {
- addHandle( booleans, function( elem, name, isXML ) {
- var val;
- if ( !isXML ) {
- return elem[ name ] === true ? name.toLowerCase() :
- (val = elem.getAttributeNode( name )) && val.specified ?
- val.value :
- null;
- }
- });
-}
-
-return Sizzle;
-
-})( window );
-
-
-
-jQuery.find = Sizzle;
-jQuery.expr = Sizzle.selectors;
-
-// Deprecated
-jQuery.expr[ ":" ] = jQuery.expr.pseudos;
-jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort;
-jQuery.text = Sizzle.getText;
-jQuery.isXMLDoc = Sizzle.isXML;
-jQuery.contains = Sizzle.contains;
-jQuery.escapeSelector = Sizzle.escape;
-
-
-
-
-var dir = function( elem, dir, until ) {
- var matched = [],
- truncate = until !== undefined;
-
- while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) {
- if ( elem.nodeType === 1 ) {
- if ( truncate && jQuery( elem ).is( until ) ) {
- break;
- }
- matched.push( elem );
- }
- }
- return matched;
-};
-
-
-var siblings = function( n, elem ) {
- var matched = [];
-
- for ( ; n; n = n.nextSibling ) {
- if ( n.nodeType === 1 && n !== elem ) {
- matched.push( n );
- }
- }
-
- return matched;
-};
-
-
-var rneedsContext = jQuery.expr.match.needsContext;
-
-
-
-function nodeName( elem, name ) {
-
- return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase();
-
-};
-var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i );
-
-
-
-var risSimple = /^.[^:#\[\.,]*$/;
-
-// Implement the identical functionality for filter and not
-function winnow( elements, qualifier, not ) {
- if ( jQuery.isFunction( qualifier ) ) {
- return jQuery.grep( elements, function( elem, i ) {
- return !!qualifier.call( elem, i, elem ) !== not;
- } );
- }
-
- // Single element
- if ( qualifier.nodeType ) {
- return jQuery.grep( elements, function( elem ) {
- return ( elem === qualifier ) !== not;
- } );
- }
-
- // Arraylike of elements (jQuery, arguments, Array)
- if ( typeof qualifier !== "string" ) {
- return jQuery.grep( elements, function( elem ) {
- return ( indexOf.call( qualifier, elem ) > -1 ) !== not;
- } );
- }
-
- // Simple selector that can be filtered directly, removing non-Elements
- if ( risSimple.test( qualifier ) ) {
- return jQuery.filter( qualifier, elements, not );
- }
-
- // Complex selector, compare the two sets, removing non-Elements
- qualifier = jQuery.filter( qualifier, elements );
- return jQuery.grep( elements, function( elem ) {
- return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1;
- } );
-}
-
-jQuery.filter = function( expr, elems, not ) {
- var elem = elems[ 0 ];
-
- if ( not ) {
- expr = ":not(" + expr + ")";
- }
-
- if ( elems.length === 1 && elem.nodeType === 1 ) {
- return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : [];
- }
-
- return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) {
- return elem.nodeType === 1;
- } ) );
-};
-
-jQuery.fn.extend( {
- find: function( selector ) {
- var i, ret,
- len = this.length,
- self = this;
-
- if ( typeof selector !== "string" ) {
- return this.pushStack( jQuery( selector ).filter( function() {
- for ( i = 0; i < len; i++ ) {
- if ( jQuery.contains( self[ i ], this ) ) {
- return true;
- }
- }
- } ) );
- }
-
- ret = this.pushStack( [] );
-
- for ( i = 0; i < len; i++ ) {
- jQuery.find( selector, self[ i ], ret );
- }
-
- return len > 1 ? jQuery.uniqueSort( ret ) : ret;
- },
- filter: function( selector ) {
- return this.pushStack( winnow( this, selector || [], false ) );
- },
- not: function( selector ) {
- return this.pushStack( winnow( this, selector || [], true ) );
- },
- is: function( selector ) {
- return !!winnow(
- this,
-
- // If this is a positional/relative selector, check membership in the returned set
- // so $("p:first").is("p:last") won't return true for a doc with two "p".
- typeof selector === "string" && rneedsContext.test( selector ) ?
- jQuery( selector ) :
- selector || [],
- false
- ).length;
- }
-} );
-
-
-// Initialize a jQuery object
-
-
-// A central reference to the root jQuery(document)
-var rootjQuery,
-
- // A simple way to check for HTML strings
- // Prioritize #id over to avoid XSS via location.hash (#9521)
- // Strict HTML recognition (#11290: must start with <)
- // Shortcut simple #id case for speed
- rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/,
-
- init = jQuery.fn.init = function( selector, context, root ) {
- var match, elem;
-
- // HANDLE: $(""), $(null), $(undefined), $(false)
- if ( !selector ) {
- return this;
- }
-
- // Method init() accepts an alternate rootjQuery
- // so migrate can support jQuery.sub (gh-2101)
- root = root || rootjQuery;
-
- // Handle HTML strings
- if ( typeof selector === "string" ) {
- if ( selector[ 0 ] === "<" &&
- selector[ selector.length - 1 ] === ">" &&
- selector.length >= 3 ) {
-
- // Assume that strings that start and end with <> are HTML and skip the regex check
- match = [ null, selector, null ];
-
- } else {
- match = rquickExpr.exec( selector );
- }
-
- // Match html or make sure no context is specified for #id
- if ( match && ( match[ 1 ] || !context ) ) {
-
- // HANDLE: $(html) -> $(array)
- if ( match[ 1 ] ) {
- context = context instanceof jQuery ? context[ 0 ] : context;
-
- // Option to run scripts is true for back-compat
- // Intentionally let the error be thrown if parseHTML is not present
- jQuery.merge( this, jQuery.parseHTML(
- match[ 1 ],
- context && context.nodeType ? context.ownerDocument || context : document,
- true
- ) );
-
- // HANDLE: $(html, props)
- if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) {
- for ( match in context ) {
-
- // Properties of context are called as methods if possible
- if ( jQuery.isFunction( this[ match ] ) ) {
- this[ match ]( context[ match ] );
-
- // ...and otherwise set as attributes
- } else {
- this.attr( match, context[ match ] );
- }
- }
- }
-
- return this;
-
- // HANDLE: $(#id)
- } else {
- elem = document.getElementById( match[ 2 ] );
-
- if ( elem ) {
-
- // Inject the element directly into the jQuery object
- this[ 0 ] = elem;
- this.length = 1;
- }
- return this;
- }
-
- // HANDLE: $(expr, $(...))
- } else if ( !context || context.jquery ) {
- return ( context || root ).find( selector );
-
- // HANDLE: $(expr, context)
- // (which is just equivalent to: $(context).find(expr)
- } else {
- return this.constructor( context ).find( selector );
- }
-
- // HANDLE: $(DOMElement)
- } else if ( selector.nodeType ) {
- this[ 0 ] = selector;
- this.length = 1;
- return this;
-
- // HANDLE: $(function)
- // Shortcut for document ready
- } else if ( jQuery.isFunction( selector ) ) {
- return root.ready !== undefined ?
- root.ready( selector ) :
-
- // Execute immediately if ready is not present
- selector( jQuery );
- }
-
- return jQuery.makeArray( selector, this );
- };
-
-// Give the init function the jQuery prototype for later instantiation
-init.prototype = jQuery.fn;
-
-// Initialize central reference
-rootjQuery = jQuery( document );
-
-
-var rparentsprev = /^(?:parents|prev(?:Until|All))/,
-
- // Methods guaranteed to produce a unique set when starting from a unique set
- guaranteedUnique = {
- children: true,
- contents: true,
- next: true,
- prev: true
- };
-
-jQuery.fn.extend( {
- has: function( target ) {
- var targets = jQuery( target, this ),
- l = targets.length;
-
- return this.filter( function() {
- var i = 0;
- for ( ; i < l; i++ ) {
- if ( jQuery.contains( this, targets[ i ] ) ) {
- return true;
- }
- }
- } );
- },
-
- closest: function( selectors, context ) {
- var cur,
- i = 0,
- l = this.length,
- matched = [],
- targets = typeof selectors !== "string" && jQuery( selectors );
-
- // Positional selectors never match, since there's no _selection_ context
- if ( !rneedsContext.test( selectors ) ) {
- for ( ; i < l; i++ ) {
- for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) {
-
- // Always skip document fragments
- if ( cur.nodeType < 11 && ( targets ?
- targets.index( cur ) > -1 :
-
- // Don't pass non-elements to Sizzle
- cur.nodeType === 1 &&
- jQuery.find.matchesSelector( cur, selectors ) ) ) {
-
- matched.push( cur );
- break;
- }
- }
- }
- }
-
- return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched );
- },
-
- // Determine the position of an element within the set
- index: function( elem ) {
-
- // No argument, return index in parent
- if ( !elem ) {
- return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1;
- }
-
- // Index in selector
- if ( typeof elem === "string" ) {
- return indexOf.call( jQuery( elem ), this[ 0 ] );
- }
-
- // Locate the position of the desired element
- return indexOf.call( this,
-
- // If it receives a jQuery object, the first element is used
- elem.jquery ? elem[ 0 ] : elem
- );
- },
-
- add: function( selector, context ) {
- return this.pushStack(
- jQuery.uniqueSort(
- jQuery.merge( this.get(), jQuery( selector, context ) )
- )
- );
- },
-
- addBack: function( selector ) {
- return this.add( selector == null ?
- this.prevObject : this.prevObject.filter( selector )
- );
- }
-} );
-
-function sibling( cur, dir ) {
- while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {}
- return cur;
-}
-
-jQuery.each( {
- parent: function( elem ) {
- var parent = elem.parentNode;
- return parent && parent.nodeType !== 11 ? parent : null;
- },
- parents: function( elem ) {
- return dir( elem, "parentNode" );
- },
- parentsUntil: function( elem, i, until ) {
- return dir( elem, "parentNode", until );
- },
- next: function( elem ) {
- return sibling( elem, "nextSibling" );
- },
- prev: function( elem ) {
- return sibling( elem, "previousSibling" );
- },
- nextAll: function( elem ) {
- return dir( elem, "nextSibling" );
- },
- prevAll: function( elem ) {
- return dir( elem, "previousSibling" );
- },
- nextUntil: function( elem, i, until ) {
- return dir( elem, "nextSibling", until );
- },
- prevUntil: function( elem, i, until ) {
- return dir( elem, "previousSibling", until );
- },
- siblings: function( elem ) {
- return siblings( ( elem.parentNode || {} ).firstChild, elem );
- },
- children: function( elem ) {
- return siblings( elem.firstChild );
- },
- contents: function( elem ) {
- if ( nodeName( elem, "iframe" ) ) {
- return elem.contentDocument;
- }
-
- // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only
- // Treat the template element as a regular one in browsers that
- // don't support it.
- if ( nodeName( elem, "template" ) ) {
- elem = elem.content || elem;
- }
-
- return jQuery.merge( [], elem.childNodes );
- }
-}, function( name, fn ) {
- jQuery.fn[ name ] = function( until, selector ) {
- var matched = jQuery.map( this, fn, until );
-
- if ( name.slice( -5 ) !== "Until" ) {
- selector = until;
- }
-
- if ( selector && typeof selector === "string" ) {
- matched = jQuery.filter( selector, matched );
- }
-
- if ( this.length > 1 ) {
-
- // Remove duplicates
- if ( !guaranteedUnique[ name ] ) {
- jQuery.uniqueSort( matched );
- }
-
- // Reverse order for parents* and prev-derivatives
- if ( rparentsprev.test( name ) ) {
- matched.reverse();
- }
- }
-
- return this.pushStack( matched );
- };
-} );
-var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g );
-
-
-
-// Convert String-formatted options into Object-formatted ones
-function createOptions( options ) {
- var object = {};
- jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) {
- object[ flag ] = true;
- } );
- return object;
-}
-
-/*
- * Create a callback list using the following parameters:
- *
- * options: an optional list of space-separated options that will change how
- * the callback list behaves or a more traditional option object
- *
- * By default a callback list will act like an event callback list and can be
- * "fired" multiple times.
- *
- * Possible options:
- *
- * once: will ensure the callback list can only be fired once (like a Deferred)
- *
- * memory: will keep track of previous values and will call any callback added
- * after the list has been fired right away with the latest "memorized"
- * values (like a Deferred)
- *
- * unique: will ensure a callback can only be added once (no duplicate in the list)
- *
- * stopOnFalse: interrupt callings when a callback returns false
- *
- */
-jQuery.Callbacks = function( options ) {
-
- // Convert options from String-formatted to Object-formatted if needed
- // (we check in cache first)
- options = typeof options === "string" ?
- createOptions( options ) :
- jQuery.extend( {}, options );
-
- var // Flag to know if list is currently firing
- firing,
-
- // Last fire value for non-forgettable lists
- memory,
-
- // Flag to know if list was already fired
- fired,
-
- // Flag to prevent firing
- locked,
-
- // Actual callback list
- list = [],
-
- // Queue of execution data for repeatable lists
- queue = [],
-
- // Index of currently firing callback (modified by add/remove as needed)
- firingIndex = -1,
-
- // Fire callbacks
- fire = function() {
-
- // Enforce single-firing
- locked = locked || options.once;
-
- // Execute callbacks for all pending executions,
- // respecting firingIndex overrides and runtime changes
- fired = firing = true;
- for ( ; queue.length; firingIndex = -1 ) {
- memory = queue.shift();
- while ( ++firingIndex < list.length ) {
-
- // Run callback and check for early termination
- if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false &&
- options.stopOnFalse ) {
-
- // Jump to end and forget the data so .add doesn't re-fire
- firingIndex = list.length;
- memory = false;
- }
- }
- }
-
- // Forget the data if we're done with it
- if ( !options.memory ) {
- memory = false;
- }
-
- firing = false;
-
- // Clean up if we're done firing for good
- if ( locked ) {
-
- // Keep an empty list if we have data for future add calls
- if ( memory ) {
- list = [];
-
- // Otherwise, this object is spent
- } else {
- list = "";
- }
- }
- },
-
- // Actual Callbacks object
- self = {
-
- // Add a callback or a collection of callbacks to the list
- add: function() {
- if ( list ) {
-
- // If we have memory from a past run, we should fire after adding
- if ( memory && !firing ) {
- firingIndex = list.length - 1;
- queue.push( memory );
- }
-
- ( function add( args ) {
- jQuery.each( args, function( _, arg ) {
- if ( jQuery.isFunction( arg ) ) {
- if ( !options.unique || !self.has( arg ) ) {
- list.push( arg );
- }
- } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) {
-
- // Inspect recursively
- add( arg );
- }
- } );
- } )( arguments );
-
- if ( memory && !firing ) {
- fire();
- }
- }
- return this;
- },
-
- // Remove a callback from the list
- remove: function() {
- jQuery.each( arguments, function( _, arg ) {
- var index;
- while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) {
- list.splice( index, 1 );
-
- // Handle firing indexes
- if ( index <= firingIndex ) {
- firingIndex--;
- }
- }
- } );
- return this;
- },
-
- // Check if a given callback is in the list.
- // If no argument is given, return whether or not list has callbacks attached.
- has: function( fn ) {
- return fn ?
- jQuery.inArray( fn, list ) > -1 :
- list.length > 0;
- },
-
- // Remove all callbacks from the list
- empty: function() {
- if ( list ) {
- list = [];
- }
- return this;
- },
-
- // Disable .fire and .add
- // Abort any current/pending executions
- // Clear all callbacks and values
- disable: function() {
- locked = queue = [];
- list = memory = "";
- return this;
- },
- disabled: function() {
- return !list;
- },
-
- // Disable .fire
- // Also disable .add unless we have memory (since it would have no effect)
- // Abort any pending executions
- lock: function() {
- locked = queue = [];
- if ( !memory && !firing ) {
- list = memory = "";
- }
- return this;
- },
- locked: function() {
- return !!locked;
- },
-
- // Call all callbacks with the given context and arguments
- fireWith: function( context, args ) {
- if ( !locked ) {
- args = args || [];
- args = [ context, args.slice ? args.slice() : args ];
- queue.push( args );
- if ( !firing ) {
- fire();
- }
- }
- return this;
- },
-
- // Call all the callbacks with the given arguments
- fire: function() {
- self.fireWith( this, arguments );
- return this;
- },
-
- // To know if the callbacks have already been called at least once
- fired: function() {
- return !!fired;
- }
- };
-
- return self;
-};
-
-
-function Identity( v ) {
- return v;
-}
-function Thrower( ex ) {
- throw ex;
-}
-
-function adoptValue( value, resolve, reject, noValue ) {
- var method;
-
- try {
-
- // Check for promise aspect first to privilege synchronous behavior
- if ( value && jQuery.isFunction( ( method = value.promise ) ) ) {
- method.call( value ).done( resolve ).fail( reject );
-
- // Other thenables
- } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) {
- method.call( value, resolve, reject );
-
- // Other non-thenables
- } else {
-
- // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer:
- // * false: [ value ].slice( 0 ) => resolve( value )
- // * true: [ value ].slice( 1 ) => resolve()
- resolve.apply( undefined, [ value ].slice( noValue ) );
- }
-
- // For Promises/A+, convert exceptions into rejections
- // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in
- // Deferred#then to conditionally suppress rejection.
- } catch ( value ) {
-
- // Support: Android 4.0 only
- // Strict mode functions invoked without .call/.apply get global-object context
- reject.apply( undefined, [ value ] );
- }
-}
-
-jQuery.extend( {
-
- Deferred: function( func ) {
- var tuples = [
-
- // action, add listener, callbacks,
- // ... .then handlers, argument index, [final state]
- [ "notify", "progress", jQuery.Callbacks( "memory" ),
- jQuery.Callbacks( "memory" ), 2 ],
- [ "resolve", "done", jQuery.Callbacks( "once memory" ),
- jQuery.Callbacks( "once memory" ), 0, "resolved" ],
- [ "reject", "fail", jQuery.Callbacks( "once memory" ),
- jQuery.Callbacks( "once memory" ), 1, "rejected" ]
- ],
- state = "pending",
- promise = {
- state: function() {
- return state;
- },
- always: function() {
- deferred.done( arguments ).fail( arguments );
- return this;
- },
- "catch": function( fn ) {
- return promise.then( null, fn );
- },
-
- // Keep pipe for back-compat
- pipe: function( /* fnDone, fnFail, fnProgress */ ) {
- var fns = arguments;
-
- return jQuery.Deferred( function( newDefer ) {
- jQuery.each( tuples, function( i, tuple ) {
-
- // Map tuples (progress, done, fail) to arguments (done, fail, progress)
- var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ];
-
- // deferred.progress(function() { bind to newDefer or newDefer.notify })
- // deferred.done(function() { bind to newDefer or newDefer.resolve })
- // deferred.fail(function() { bind to newDefer or newDefer.reject })
- deferred[ tuple[ 1 ] ]( function() {
- var returned = fn && fn.apply( this, arguments );
- if ( returned && jQuery.isFunction( returned.promise ) ) {
- returned.promise()
- .progress( newDefer.notify )
- .done( newDefer.resolve )
- .fail( newDefer.reject );
- } else {
- newDefer[ tuple[ 0 ] + "With" ](
- this,
- fn ? [ returned ] : arguments
- );
- }
- } );
- } );
- fns = null;
- } ).promise();
- },
- then: function( onFulfilled, onRejected, onProgress ) {
- var maxDepth = 0;
- function resolve( depth, deferred, handler, special ) {
- return function() {
- var that = this,
- args = arguments,
- mightThrow = function() {
- var returned, then;
-
- // Support: Promises/A+ section 2.3.3.3.3
- // https://promisesaplus.com/#point-59
- // Ignore double-resolution attempts
- if ( depth < maxDepth ) {
- return;
- }
-
- returned = handler.apply( that, args );
-
- // Support: Promises/A+ section 2.3.1
- // https://promisesaplus.com/#point-48
- if ( returned === deferred.promise() ) {
- throw new TypeError( "Thenable self-resolution" );
- }
-
- // Support: Promises/A+ sections 2.3.3.1, 3.5
- // https://promisesaplus.com/#point-54
- // https://promisesaplus.com/#point-75
- // Retrieve `then` only once
- then = returned &&
-
- // Support: Promises/A+ section 2.3.4
- // https://promisesaplus.com/#point-64
- // Only check objects and functions for thenability
- ( typeof returned === "object" ||
- typeof returned === "function" ) &&
- returned.then;
-
- // Handle a returned thenable
- if ( jQuery.isFunction( then ) ) {
-
- // Special processors (notify) just wait for resolution
- if ( special ) {
- then.call(
- returned,
- resolve( maxDepth, deferred, Identity, special ),
- resolve( maxDepth, deferred, Thrower, special )
- );
-
- // Normal processors (resolve) also hook into progress
- } else {
-
- // ...and disregard older resolution values
- maxDepth++;
-
- then.call(
- returned,
- resolve( maxDepth, deferred, Identity, special ),
- resolve( maxDepth, deferred, Thrower, special ),
- resolve( maxDepth, deferred, Identity,
- deferred.notifyWith )
- );
- }
-
- // Handle all other returned values
- } else {
-
- // Only substitute handlers pass on context
- // and multiple values (non-spec behavior)
- if ( handler !== Identity ) {
- that = undefined;
- args = [ returned ];
- }
-
- // Process the value(s)
- // Default process is resolve
- ( special || deferred.resolveWith )( that, args );
- }
- },
-
- // Only normal processors (resolve) catch and reject exceptions
- process = special ?
- mightThrow :
- function() {
- try {
- mightThrow();
- } catch ( e ) {
-
- if ( jQuery.Deferred.exceptionHook ) {
- jQuery.Deferred.exceptionHook( e,
- process.stackTrace );
- }
-
- // Support: Promises/A+ section 2.3.3.3.4.1
- // https://promisesaplus.com/#point-61
- // Ignore post-resolution exceptions
- if ( depth + 1 >= maxDepth ) {
-
- // Only substitute handlers pass on context
- // and multiple values (non-spec behavior)
- if ( handler !== Thrower ) {
- that = undefined;
- args = [ e ];
- }
-
- deferred.rejectWith( that, args );
- }
- }
- };
-
- // Support: Promises/A+ section 2.3.3.3.1
- // https://promisesaplus.com/#point-57
- // Re-resolve promises immediately to dodge false rejection from
- // subsequent errors
- if ( depth ) {
- process();
- } else {
-
- // Call an optional hook to record the stack, in case of exception
- // since it's otherwise lost when execution goes async
- if ( jQuery.Deferred.getStackHook ) {
- process.stackTrace = jQuery.Deferred.getStackHook();
- }
- window.setTimeout( process );
- }
- };
- }
-
- return jQuery.Deferred( function( newDefer ) {
-
- // progress_handlers.add( ... )
- tuples[ 0 ][ 3 ].add(
- resolve(
- 0,
- newDefer,
- jQuery.isFunction( onProgress ) ?
- onProgress :
- Identity,
- newDefer.notifyWith
- )
- );
-
- // fulfilled_handlers.add( ... )
- tuples[ 1 ][ 3 ].add(
- resolve(
- 0,
- newDefer,
- jQuery.isFunction( onFulfilled ) ?
- onFulfilled :
- Identity
- )
- );
-
- // rejected_handlers.add( ... )
- tuples[ 2 ][ 3 ].add(
- resolve(
- 0,
- newDefer,
- jQuery.isFunction( onRejected ) ?
- onRejected :
- Thrower
- )
- );
- } ).promise();
- },
-
- // Get a promise for this deferred
- // If obj is provided, the promise aspect is added to the object
- promise: function( obj ) {
- return obj != null ? jQuery.extend( obj, promise ) : promise;
- }
- },
- deferred = {};
-
- // Add list-specific methods
- jQuery.each( tuples, function( i, tuple ) {
- var list = tuple[ 2 ],
- stateString = tuple[ 5 ];
-
- // promise.progress = list.add
- // promise.done = list.add
- // promise.fail = list.add
- promise[ tuple[ 1 ] ] = list.add;
-
- // Handle state
- if ( stateString ) {
- list.add(
- function() {
-
- // state = "resolved" (i.e., fulfilled)
- // state = "rejected"
- state = stateString;
- },
-
- // rejected_callbacks.disable
- // fulfilled_callbacks.disable
- tuples[ 3 - i ][ 2 ].disable,
-
- // progress_callbacks.lock
- tuples[ 0 ][ 2 ].lock
- );
- }
-
- // progress_handlers.fire
- // fulfilled_handlers.fire
- // rejected_handlers.fire
- list.add( tuple[ 3 ].fire );
-
- // deferred.notify = function() { deferred.notifyWith(...) }
- // deferred.resolve = function() { deferred.resolveWith(...) }
- // deferred.reject = function() { deferred.rejectWith(...) }
- deferred[ tuple[ 0 ] ] = function() {
- deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments );
- return this;
- };
-
- // deferred.notifyWith = list.fireWith
- // deferred.resolveWith = list.fireWith
- // deferred.rejectWith = list.fireWith
- deferred[ tuple[ 0 ] + "With" ] = list.fireWith;
- } );
-
- // Make the deferred a promise
- promise.promise( deferred );
-
- // Call given func if any
- if ( func ) {
- func.call( deferred, deferred );
- }
-
- // All done!
- return deferred;
- },
-
- // Deferred helper
- when: function( singleValue ) {
- var
-
- // count of uncompleted subordinates
- remaining = arguments.length,
-
- // count of unprocessed arguments
- i = remaining,
-
- // subordinate fulfillment data
- resolveContexts = Array( i ),
- resolveValues = slice.call( arguments ),
-
- // the master Deferred
- master = jQuery.Deferred(),
-
- // subordinate callback factory
- updateFunc = function( i ) {
- return function( value ) {
- resolveContexts[ i ] = this;
- resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value;
- if ( !( --remaining ) ) {
- master.resolveWith( resolveContexts, resolveValues );
- }
- };
- };
-
- // Single- and empty arguments are adopted like Promise.resolve
- if ( remaining <= 1 ) {
- adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject,
- !remaining );
-
- // Use .then() to unwrap secondary thenables (cf. gh-3000)
- if ( master.state() === "pending" ||
- jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) {
-
- return master.then();
- }
- }
-
- // Multiple arguments are aggregated like Promise.all array elements
- while ( i-- ) {
- adoptValue( resolveValues[ i ], updateFunc( i ), master.reject );
- }
-
- return master.promise();
- }
-} );
-
-
-// These usually indicate a programmer mistake during development,
-// warn about them ASAP rather than swallowing them by default.
-var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;
-
-jQuery.Deferred.exceptionHook = function( error, stack ) {
-
- // Support: IE 8 - 9 only
- // Console exists when dev tools are open, which can happen at any time
- if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) {
- window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack );
- }
-};
-
-
-
-
-jQuery.readyException = function( error ) {
- window.setTimeout( function() {
- throw error;
- } );
-};
-
-
-
-
-// The deferred used on DOM ready
-var readyList = jQuery.Deferred();
-
-jQuery.fn.ready = function( fn ) {
-
- readyList
- .then( fn )
-
- // Wrap jQuery.readyException in a function so that the lookup
- // happens at the time of error handling instead of callback
- // registration.
- .catch( function( error ) {
- jQuery.readyException( error );
- } );
-
- return this;
-};
-
-jQuery.extend( {
-
- // Is the DOM ready to be used? Set to true once it occurs.
- isReady: false,
-
- // A counter to track how many items to wait for before
- // the ready event fires. See #6781
- readyWait: 1,
-
- // Handle when the DOM is ready
- ready: function( wait ) {
-
- // Abort if there are pending holds or we're already ready
- if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) {
- return;
- }
-
- // Remember that the DOM is ready
- jQuery.isReady = true;
-
- // If a normal DOM Ready event fired, decrement, and wait if need be
- if ( wait !== true && --jQuery.readyWait > 0 ) {
- return;
- }
-
- // If there are functions bound, to execute
- readyList.resolveWith( document, [ jQuery ] );
- }
-} );
-
-jQuery.ready.then = readyList.then;
-
-// The ready event handler and self cleanup method
-function completed() {
- document.removeEventListener( "DOMContentLoaded", completed );
- window.removeEventListener( "load", completed );
- jQuery.ready();
-}
-
-// Catch cases where $(document).ready() is called
-// after the browser event has already occurred.
-// Support: IE <=9 - 10 only
-// Older IE sometimes signals "interactive" too soon
-if ( document.readyState === "complete" ||
- ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) {
-
- // Handle it asynchronously to allow scripts the opportunity to delay ready
- window.setTimeout( jQuery.ready );
-
-} else {
-
- // Use the handy event callback
- document.addEventListener( "DOMContentLoaded", completed );
-
- // A fallback to window.onload, that will always work
- window.addEventListener( "load", completed );
-}
-
-
-
-
-// Multifunctional method to get and set values of a collection
-// The value/s can optionally be executed if it's a function
-var access = function( elems, fn, key, value, chainable, emptyGet, raw ) {
- var i = 0,
- len = elems.length,
- bulk = key == null;
-
- // Sets many values
- if ( jQuery.type( key ) === "object" ) {
- chainable = true;
- for ( i in key ) {
- access( elems, fn, i, key[ i ], true, emptyGet, raw );
- }
-
- // Sets one value
- } else if ( value !== undefined ) {
- chainable = true;
-
- if ( !jQuery.isFunction( value ) ) {
- raw = true;
- }
-
- if ( bulk ) {
-
- // Bulk operations run against the entire set
- if ( raw ) {
- fn.call( elems, value );
- fn = null;
-
- // ...except when executing function values
- } else {
- bulk = fn;
- fn = function( elem, key, value ) {
- return bulk.call( jQuery( elem ), value );
- };
- }
- }
-
- if ( fn ) {
- for ( ; i < len; i++ ) {
- fn(
- elems[ i ], key, raw ?
- value :
- value.call( elems[ i ], i, fn( elems[ i ], key ) )
- );
- }
- }
- }
-
- if ( chainable ) {
- return elems;
- }
-
- // Gets
- if ( bulk ) {
- return fn.call( elems );
- }
-
- return len ? fn( elems[ 0 ], key ) : emptyGet;
-};
-var acceptData = function( owner ) {
-
- // Accepts only:
- // - Node
- // - Node.ELEMENT_NODE
- // - Node.DOCUMENT_NODE
- // - Object
- // - Any
- return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType );
-};
-
-
-
-
-function Data() {
- this.expando = jQuery.expando + Data.uid++;
-}
-
-Data.uid = 1;
-
-Data.prototype = {
-
- cache: function( owner ) {
-
- // Check if the owner object already has a cache
- var value = owner[ this.expando ];
-
- // If not, create one
- if ( !value ) {
- value = {};
-
- // We can accept data for non-element nodes in modern browsers,
- // but we should not, see #8335.
- // Always return an empty object.
- if ( acceptData( owner ) ) {
-
- // If it is a node unlikely to be stringify-ed or looped over
- // use plain assignment
- if ( owner.nodeType ) {
- owner[ this.expando ] = value;
-
- // Otherwise secure it in a non-enumerable property
- // configurable must be true to allow the property to be
- // deleted when data is removed
- } else {
- Object.defineProperty( owner, this.expando, {
- value: value,
- configurable: true
- } );
- }
- }
- }
-
- return value;
- },
- set: function( owner, data, value ) {
- var prop,
- cache = this.cache( owner );
-
- // Handle: [ owner, key, value ] args
- // Always use camelCase key (gh-2257)
- if ( typeof data === "string" ) {
- cache[ jQuery.camelCase( data ) ] = value;
-
- // Handle: [ owner, { properties } ] args
- } else {
-
- // Copy the properties one-by-one to the cache object
- for ( prop in data ) {
- cache[ jQuery.camelCase( prop ) ] = data[ prop ];
- }
- }
- return cache;
- },
- get: function( owner, key ) {
- return key === undefined ?
- this.cache( owner ) :
-
- // Always use camelCase key (gh-2257)
- owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ];
- },
- access: function( owner, key, value ) {
-
- // In cases where either:
- //
- // 1. No key was specified
- // 2. A string key was specified, but no value provided
- //
- // Take the "read" path and allow the get method to determine
- // which value to return, respectively either:
- //
- // 1. The entire cache object
- // 2. The data stored at the key
- //
- if ( key === undefined ||
- ( ( key && typeof key === "string" ) && value === undefined ) ) {
-
- return this.get( owner, key );
- }
-
- // When the key is not a string, or both a key and value
- // are specified, set or extend (existing objects) with either:
- //
- // 1. An object of properties
- // 2. A key and value
- //
- this.set( owner, key, value );
-
- // Since the "set" path can have two possible entry points
- // return the expected data based on which path was taken[*]
- return value !== undefined ? value : key;
- },
- remove: function( owner, key ) {
- var i,
- cache = owner[ this.expando ];
-
- if ( cache === undefined ) {
- return;
- }
-
- if ( key !== undefined ) {
-
- // Support array or space separated string of keys
- if ( Array.isArray( key ) ) {
-
- // If key is an array of keys...
- // We always set camelCase keys, so remove that.
- key = key.map( jQuery.camelCase );
- } else {
- key = jQuery.camelCase( key );
-
- // If a key with the spaces exists, use it.
- // Otherwise, create an array by matching non-whitespace
- key = key in cache ?
- [ key ] :
- ( key.match( rnothtmlwhite ) || [] );
- }
-
- i = key.length;
-
- while ( i-- ) {
- delete cache[ key[ i ] ];
- }
- }
-
- // Remove the expando if there's no more data
- if ( key === undefined || jQuery.isEmptyObject( cache ) ) {
-
- // Support: Chrome <=35 - 45
- // Webkit & Blink performance suffers when deleting properties
- // from DOM nodes, so set to undefined instead
- // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted)
- if ( owner.nodeType ) {
- owner[ this.expando ] = undefined;
- } else {
- delete owner[ this.expando ];
- }
- }
- },
- hasData: function( owner ) {
- var cache = owner[ this.expando ];
- return cache !== undefined && !jQuery.isEmptyObject( cache );
- }
-};
-var dataPriv = new Data();
-
-var dataUser = new Data();
-
-
-
-// Implementation Summary
-//
-// 1. Enforce API surface and semantic compatibility with 1.9.x branch
-// 2. Improve the module's maintainability by reducing the storage
-// paths to a single mechanism.
-// 3. Use the same single mechanism to support "private" and "user" data.
-// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData)
-// 5. Avoid exposing implementation details on user objects (eg. expando properties)
-// 6. Provide a clear path for implementation upgrade to WeakMap in 2014
-
-var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,
- rmultiDash = /[A-Z]/g;
-
-function getData( data ) {
- if ( data === "true" ) {
- return true;
- }
-
- if ( data === "false" ) {
- return false;
- }
-
- if ( data === "null" ) {
- return null;
- }
-
- // Only convert to a number if it doesn't change the string
- if ( data === +data + "" ) {
- return +data;
- }
-
- if ( rbrace.test( data ) ) {
- return JSON.parse( data );
- }
-
- return data;
-}
-
-function dataAttr( elem, key, data ) {
- var name;
-
- // If nothing was found internally, try to fetch any
- // data from the HTML5 data-* attribute
- if ( data === undefined && elem.nodeType === 1 ) {
- name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase();
- data = elem.getAttribute( name );
-
- if ( typeof data === "string" ) {
- try {
- data = getData( data );
- } catch ( e ) {}
-
- // Make sure we set the data so it isn't changed later
- dataUser.set( elem, key, data );
- } else {
- data = undefined;
- }
- }
- return data;
-}
-
-jQuery.extend( {
- hasData: function( elem ) {
- return dataUser.hasData( elem ) || dataPriv.hasData( elem );
- },
-
- data: function( elem, name, data ) {
- return dataUser.access( elem, name, data );
- },
-
- removeData: function( elem, name ) {
- dataUser.remove( elem, name );
- },
-
- // TODO: Now that all calls to _data and _removeData have been replaced
- // with direct calls to dataPriv methods, these can be deprecated.
- _data: function( elem, name, data ) {
- return dataPriv.access( elem, name, data );
- },
-
- _removeData: function( elem, name ) {
- dataPriv.remove( elem, name );
- }
-} );
-
-jQuery.fn.extend( {
- data: function( key, value ) {
- var i, name, data,
- elem = this[ 0 ],
- attrs = elem && elem.attributes;
-
- // Gets all values
- if ( key === undefined ) {
- if ( this.length ) {
- data = dataUser.get( elem );
-
- if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) {
- i = attrs.length;
- while ( i-- ) {
-
- // Support: IE 11 only
- // The attrs elements can be null (#14894)
- if ( attrs[ i ] ) {
- name = attrs[ i ].name;
- if ( name.indexOf( "data-" ) === 0 ) {
- name = jQuery.camelCase( name.slice( 5 ) );
- dataAttr( elem, name, data[ name ] );
- }
- }
- }
- dataPriv.set( elem, "hasDataAttrs", true );
- }
- }
-
- return data;
- }
-
- // Sets multiple values
- if ( typeof key === "object" ) {
- return this.each( function() {
- dataUser.set( this, key );
- } );
- }
-
- return access( this, function( value ) {
- var data;
-
- // The calling jQuery object (element matches) is not empty
- // (and therefore has an element appears at this[ 0 ]) and the
- // `value` parameter was not undefined. An empty jQuery object
- // will result in `undefined` for elem = this[ 0 ] which will
- // throw an exception if an attempt to read a data cache is made.
- if ( elem && value === undefined ) {
-
- // Attempt to get data from the cache
- // The key will always be camelCased in Data
- data = dataUser.get( elem, key );
- if ( data !== undefined ) {
- return data;
- }
-
- // Attempt to "discover" the data in
- // HTML5 custom data-* attrs
- data = dataAttr( elem, key );
- if ( data !== undefined ) {
- return data;
- }
-
- // We tried really hard, but the data doesn't exist.
- return;
- }
-
- // Set the data...
- this.each( function() {
-
- // We always store the camelCased key
- dataUser.set( this, key, value );
- } );
- }, null, value, arguments.length > 1, null, true );
- },
-
- removeData: function( key ) {
- return this.each( function() {
- dataUser.remove( this, key );
- } );
- }
-} );
-
-
-jQuery.extend( {
- queue: function( elem, type, data ) {
- var queue;
-
- if ( elem ) {
- type = ( type || "fx" ) + "queue";
- queue = dataPriv.get( elem, type );
-
- // Speed up dequeue by getting out quickly if this is just a lookup
- if ( data ) {
- if ( !queue || Array.isArray( data ) ) {
- queue = dataPriv.access( elem, type, jQuery.makeArray( data ) );
- } else {
- queue.push( data );
- }
- }
- return queue || [];
- }
- },
-
- dequeue: function( elem, type ) {
- type = type || "fx";
-
- var queue = jQuery.queue( elem, type ),
- startLength = queue.length,
- fn = queue.shift(),
- hooks = jQuery._queueHooks( elem, type ),
- next = function() {
- jQuery.dequeue( elem, type );
- };
-
- // If the fx queue is dequeued, always remove the progress sentinel
- if ( fn === "inprogress" ) {
- fn = queue.shift();
- startLength--;
- }
-
- if ( fn ) {
-
- // Add a progress sentinel to prevent the fx queue from being
- // automatically dequeued
- if ( type === "fx" ) {
- queue.unshift( "inprogress" );
- }
-
- // Clear up the last queue stop function
- delete hooks.stop;
- fn.call( elem, next, hooks );
- }
-
- if ( !startLength && hooks ) {
- hooks.empty.fire();
- }
- },
-
- // Not public - generate a queueHooks object, or return the current one
- _queueHooks: function( elem, type ) {
- var key = type + "queueHooks";
- return dataPriv.get( elem, key ) || dataPriv.access( elem, key, {
- empty: jQuery.Callbacks( "once memory" ).add( function() {
- dataPriv.remove( elem, [ type + "queue", key ] );
- } )
- } );
- }
-} );
-
-jQuery.fn.extend( {
- queue: function( type, data ) {
- var setter = 2;
-
- if ( typeof type !== "string" ) {
- data = type;
- type = "fx";
- setter--;
- }
-
- if ( arguments.length < setter ) {
- return jQuery.queue( this[ 0 ], type );
- }
-
- return data === undefined ?
- this :
- this.each( function() {
- var queue = jQuery.queue( this, type, data );
-
- // Ensure a hooks for this queue
- jQuery._queueHooks( this, type );
-
- if ( type === "fx" && queue[ 0 ] !== "inprogress" ) {
- jQuery.dequeue( this, type );
- }
- } );
- },
- dequeue: function( type ) {
- return this.each( function() {
- jQuery.dequeue( this, type );
- } );
- },
- clearQueue: function( type ) {
- return this.queue( type || "fx", [] );
- },
-
- // Get a promise resolved when queues of a certain type
- // are emptied (fx is the type by default)
- promise: function( type, obj ) {
- var tmp,
- count = 1,
- defer = jQuery.Deferred(),
- elements = this,
- i = this.length,
- resolve = function() {
- if ( !( --count ) ) {
- defer.resolveWith( elements, [ elements ] );
- }
- };
-
- if ( typeof type !== "string" ) {
- obj = type;
- type = undefined;
- }
- type = type || "fx";
-
- while ( i-- ) {
- tmp = dataPriv.get( elements[ i ], type + "queueHooks" );
- if ( tmp && tmp.empty ) {
- count++;
- tmp.empty.add( resolve );
- }
- }
- resolve();
- return defer.promise( obj );
- }
-} );
-var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source;
-
-var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" );
-
-
-var cssExpand = [ "Top", "Right", "Bottom", "Left" ];
-
-var isHiddenWithinTree = function( elem, el ) {
-
- // isHiddenWithinTree might be called from jQuery#filter function;
- // in that case, element will be second argument
- elem = el || elem;
-
- // Inline style trumps all
- return elem.style.display === "none" ||
- elem.style.display === "" &&
-
- // Otherwise, check computed style
- // Support: Firefox <=43 - 45
- // Disconnected elements can have computed display: none, so first confirm that elem is
- // in the document.
- jQuery.contains( elem.ownerDocument, elem ) &&
-
- jQuery.css( elem, "display" ) === "none";
- };
-
-var swap = function( elem, options, callback, args ) {
- var ret, name,
- old = {};
-
- // Remember the old values, and insert the new ones
- for ( name in options ) {
- old[ name ] = elem.style[ name ];
- elem.style[ name ] = options[ name ];
- }
-
- ret = callback.apply( elem, args || [] );
-
- // Revert the old values
- for ( name in options ) {
- elem.style[ name ] = old[ name ];
- }
-
- return ret;
-};
-
-
-
-
-function adjustCSS( elem, prop, valueParts, tween ) {
- var adjusted,
- scale = 1,
- maxIterations = 20,
- currentValue = tween ?
- function() {
- return tween.cur();
- } :
- function() {
- return jQuery.css( elem, prop, "" );
- },
- initial = currentValue(),
- unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ),
-
- // Starting value computation is required for potential unit mismatches
- initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) &&
- rcssNum.exec( jQuery.css( elem, prop ) );
-
- if ( initialInUnit && initialInUnit[ 3 ] !== unit ) {
-
- // Trust units reported by jQuery.css
- unit = unit || initialInUnit[ 3 ];
-
- // Make sure we update the tween properties later on
- valueParts = valueParts || [];
-
- // Iteratively approximate from a nonzero starting point
- initialInUnit = +initial || 1;
-
- do {
-
- // If previous iteration zeroed out, double until we get *something*.
- // Use string for doubling so we don't accidentally see scale as unchanged below
- scale = scale || ".5";
-
- // Adjust and apply
- initialInUnit = initialInUnit / scale;
- jQuery.style( elem, prop, initialInUnit + unit );
-
- // Update scale, tolerating zero or NaN from tween.cur()
- // Break the loop if scale is unchanged or perfect, or if we've just had enough.
- } while (
- scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations
- );
- }
-
- if ( valueParts ) {
- initialInUnit = +initialInUnit || +initial || 0;
-
- // Apply relative offset (+=/-=) if specified
- adjusted = valueParts[ 1 ] ?
- initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] :
- +valueParts[ 2 ];
- if ( tween ) {
- tween.unit = unit;
- tween.start = initialInUnit;
- tween.end = adjusted;
- }
- }
- return adjusted;
-}
-
-
-var defaultDisplayMap = {};
-
-function getDefaultDisplay( elem ) {
- var temp,
- doc = elem.ownerDocument,
- nodeName = elem.nodeName,
- display = defaultDisplayMap[ nodeName ];
-
- if ( display ) {
- return display;
- }
-
- temp = doc.body.appendChild( doc.createElement( nodeName ) );
- display = jQuery.css( temp, "display" );
-
- temp.parentNode.removeChild( temp );
-
- if ( display === "none" ) {
- display = "block";
- }
- defaultDisplayMap[ nodeName ] = display;
-
- return display;
-}
-
-function showHide( elements, show ) {
- var display, elem,
- values = [],
- index = 0,
- length = elements.length;
-
- // Determine new display value for elements that need to change
- for ( ; index < length; index++ ) {
- elem = elements[ index ];
- if ( !elem.style ) {
- continue;
- }
-
- display = elem.style.display;
- if ( show ) {
-
- // Since we force visibility upon cascade-hidden elements, an immediate (and slow)
- // check is required in this first loop unless we have a nonempty display value (either
- // inline or about-to-be-restored)
- if ( display === "none" ) {
- values[ index ] = dataPriv.get( elem, "display" ) || null;
- if ( !values[ index ] ) {
- elem.style.display = "";
- }
- }
- if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) {
- values[ index ] = getDefaultDisplay( elem );
- }
- } else {
- if ( display !== "none" ) {
- values[ index ] = "none";
-
- // Remember what we're overwriting
- dataPriv.set( elem, "display", display );
- }
- }
- }
-
- // Set the display of the elements in a second loop to avoid constant reflow
- for ( index = 0; index < length; index++ ) {
- if ( values[ index ] != null ) {
- elements[ index ].style.display = values[ index ];
- }
- }
-
- return elements;
-}
-
-jQuery.fn.extend( {
- show: function() {
- return showHide( this, true );
- },
- hide: function() {
- return showHide( this );
- },
- toggle: function( state ) {
- if ( typeof state === "boolean" ) {
- return state ? this.show() : this.hide();
- }
-
- return this.each( function() {
- if ( isHiddenWithinTree( this ) ) {
- jQuery( this ).show();
- } else {
- jQuery( this ).hide();
- }
- } );
- }
-} );
-var rcheckableType = ( /^(?:checkbox|radio)$/i );
-
-var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i );
-
-var rscriptType = ( /^$|\/(?:java|ecma)script/i );
-
-
-
-// We have to close these tags to support XHTML (#13200)
-var wrapMap = {
-
- // Support: IE <=9 only
- option: [ 1, "", " " ],
-
- // XHTML parsers do not magically insert elements in the
- // same way that tag soup parsers do. So we cannot shorten
- // this by omitting or other required elements.
- thead: [ 1, "" ],
- col: [ 2, "" ],
- tr: [ 2, "" ],
- td: [ 3, "" ],
-
- _default: [ 0, "", "" ]
-};
-
-// Support: IE <=9 only
-wrapMap.optgroup = wrapMap.option;
-
-wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead;
-wrapMap.th = wrapMap.td;
-
-
-function getAll( context, tag ) {
-
- // Support: IE <=9 - 11 only
- // Use typeof to avoid zero-argument method invocation on host objects (#15151)
- var ret;
-
- if ( typeof context.getElementsByTagName !== "undefined" ) {
- ret = context.getElementsByTagName( tag || "*" );
-
- } else if ( typeof context.querySelectorAll !== "undefined" ) {
- ret = context.querySelectorAll( tag || "*" );
-
- } else {
- ret = [];
- }
-
- if ( tag === undefined || tag && nodeName( context, tag ) ) {
- return jQuery.merge( [ context ], ret );
- }
-
- return ret;
-}
-
-
-// Mark scripts as having already been evaluated
-function setGlobalEval( elems, refElements ) {
- var i = 0,
- l = elems.length;
-
- for ( ; i < l; i++ ) {
- dataPriv.set(
- elems[ i ],
- "globalEval",
- !refElements || dataPriv.get( refElements[ i ], "globalEval" )
- );
- }
-}
-
-
-var rhtml = /<|?\w+;/;
-
-function buildFragment( elems, context, scripts, selection, ignored ) {
- var elem, tmp, tag, wrap, contains, j,
- fragment = context.createDocumentFragment(),
- nodes = [],
- i = 0,
- l = elems.length;
-
- for ( ; i < l; i++ ) {
- elem = elems[ i ];
-
- if ( elem || elem === 0 ) {
-
- // Add nodes directly
- if ( jQuery.type( elem ) === "object" ) {
-
- // Support: Android <=4.0 only, PhantomJS 1 only
- // push.apply(_, arraylike) throws on ancient WebKit
- jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem );
-
- // Convert non-html into a text node
- } else if ( !rhtml.test( elem ) ) {
- nodes.push( context.createTextNode( elem ) );
-
- // Convert html into DOM nodes
- } else {
- tmp = tmp || fragment.appendChild( context.createElement( "div" ) );
-
- // Deserialize a standard representation
- tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase();
- wrap = wrapMap[ tag ] || wrapMap._default;
- tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ];
-
- // Descend through wrappers to the right content
- j = wrap[ 0 ];
- while ( j-- ) {
- tmp = tmp.lastChild;
- }
-
- // Support: Android <=4.0 only, PhantomJS 1 only
- // push.apply(_, arraylike) throws on ancient WebKit
- jQuery.merge( nodes, tmp.childNodes );
-
- // Remember the top-level container
- tmp = fragment.firstChild;
-
- // Ensure the created nodes are orphaned (#12392)
- tmp.textContent = "";
- }
- }
- }
-
- // Remove wrapper from fragment
- fragment.textContent = "";
-
- i = 0;
- while ( ( elem = nodes[ i++ ] ) ) {
-
- // Skip elements already in the context collection (trac-4087)
- if ( selection && jQuery.inArray( elem, selection ) > -1 ) {
- if ( ignored ) {
- ignored.push( elem );
- }
- continue;
- }
-
- contains = jQuery.contains( elem.ownerDocument, elem );
-
- // Append to fragment
- tmp = getAll( fragment.appendChild( elem ), "script" );
-
- // Preserve script evaluation history
- if ( contains ) {
- setGlobalEval( tmp );
- }
-
- // Capture executables
- if ( scripts ) {
- j = 0;
- while ( ( elem = tmp[ j++ ] ) ) {
- if ( rscriptType.test( elem.type || "" ) ) {
- scripts.push( elem );
- }
- }
- }
- }
-
- return fragment;
-}
-
-
-( function() {
- var fragment = document.createDocumentFragment(),
- div = fragment.appendChild( document.createElement( "div" ) ),
- input = document.createElement( "input" );
-
- // Support: Android 4.0 - 4.3 only
- // Check state lost if the name is set (#11217)
- // Support: Windows Web Apps (WWA)
- // `name` and `type` must use .setAttribute for WWA (#14901)
- input.setAttribute( "type", "radio" );
- input.setAttribute( "checked", "checked" );
- input.setAttribute( "name", "t" );
-
- div.appendChild( input );
-
- // Support: Android <=4.1 only
- // Older WebKit doesn't clone checked state correctly in fragments
- support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked;
-
- // Support: IE <=11 only
- // Make sure textarea (and checkbox) defaultValue is properly cloned
- div.innerHTML = "";
- support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue;
-} )();
-var documentElement = document.documentElement;
-
-
-
-var
- rkeyEvent = /^key/,
- rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/,
- rtypenamespace = /^([^.]*)(?:\.(.+)|)/;
-
-function returnTrue() {
- return true;
-}
-
-function returnFalse() {
- return false;
-}
-
-// Support: IE <=9 only
-// See #13393 for more info
-function safeActiveElement() {
- try {
- return document.activeElement;
- } catch ( err ) { }
-}
-
-function on( elem, types, selector, data, fn, one ) {
- var origFn, type;
-
- // Types can be a map of types/handlers
- if ( typeof types === "object" ) {
-
- // ( types-Object, selector, data )
- if ( typeof selector !== "string" ) {
-
- // ( types-Object, data )
- data = data || selector;
- selector = undefined;
- }
- for ( type in types ) {
- on( elem, type, selector, data, types[ type ], one );
- }
- return elem;
- }
-
- if ( data == null && fn == null ) {
-
- // ( types, fn )
- fn = selector;
- data = selector = undefined;
- } else if ( fn == null ) {
- if ( typeof selector === "string" ) {
-
- // ( types, selector, fn )
- fn = data;
- data = undefined;
- } else {
-
- // ( types, data, fn )
- fn = data;
- data = selector;
- selector = undefined;
- }
- }
- if ( fn === false ) {
- fn = returnFalse;
- } else if ( !fn ) {
- return elem;
- }
-
- if ( one === 1 ) {
- origFn = fn;
- fn = function( event ) {
-
- // Can use an empty set, since event contains the info
- jQuery().off( event );
- return origFn.apply( this, arguments );
- };
-
- // Use same guid so caller can remove using origFn
- fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ );
- }
- return elem.each( function() {
- jQuery.event.add( this, types, fn, data, selector );
- } );
-}
-
-/*
- * Helper functions for managing events -- not part of the public interface.
- * Props to Dean Edwards' addEvent library for many of the ideas.
- */
-jQuery.event = {
-
- global: {},
-
- add: function( elem, types, handler, data, selector ) {
-
- var handleObjIn, eventHandle, tmp,
- events, t, handleObj,
- special, handlers, type, namespaces, origType,
- elemData = dataPriv.get( elem );
-
- // Don't attach events to noData or text/comment nodes (but allow plain objects)
- if ( !elemData ) {
- return;
- }
-
- // Caller can pass in an object of custom data in lieu of the handler
- if ( handler.handler ) {
- handleObjIn = handler;
- handler = handleObjIn.handler;
- selector = handleObjIn.selector;
- }
-
- // Ensure that invalid selectors throw exceptions at attach time
- // Evaluate against documentElement in case elem is a non-element node (e.g., document)
- if ( selector ) {
- jQuery.find.matchesSelector( documentElement, selector );
- }
-
- // Make sure that the handler has a unique ID, used to find/remove it later
- if ( !handler.guid ) {
- handler.guid = jQuery.guid++;
- }
-
- // Init the element's event structure and main handler, if this is the first
- if ( !( events = elemData.events ) ) {
- events = elemData.events = {};
- }
- if ( !( eventHandle = elemData.handle ) ) {
- eventHandle = elemData.handle = function( e ) {
-
- // Discard the second event of a jQuery.event.trigger() and
- // when an event is called after a page has unloaded
- return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ?
- jQuery.event.dispatch.apply( elem, arguments ) : undefined;
- };
- }
-
- // Handle multiple events separated by a space
- types = ( types || "" ).match( rnothtmlwhite ) || [ "" ];
- t = types.length;
- while ( t-- ) {
- tmp = rtypenamespace.exec( types[ t ] ) || [];
- type = origType = tmp[ 1 ];
- namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort();
-
- // There *must* be a type, no attaching namespace-only handlers
- if ( !type ) {
- continue;
- }
-
- // If event changes its type, use the special event handlers for the changed type
- special = jQuery.event.special[ type ] || {};
-
- // If selector defined, determine special event api type, otherwise given type
- type = ( selector ? special.delegateType : special.bindType ) || type;
-
- // Update special based on newly reset type
- special = jQuery.event.special[ type ] || {};
-
- // handleObj is passed to all event handlers
- handleObj = jQuery.extend( {
- type: type,
- origType: origType,
- data: data,
- handler: handler,
- guid: handler.guid,
- selector: selector,
- needsContext: selector && jQuery.expr.match.needsContext.test( selector ),
- namespace: namespaces.join( "." )
- }, handleObjIn );
-
- // Init the event handler queue if we're the first
- if ( !( handlers = events[ type ] ) ) {
- handlers = events[ type ] = [];
- handlers.delegateCount = 0;
-
- // Only use addEventListener if the special events handler returns false
- if ( !special.setup ||
- special.setup.call( elem, data, namespaces, eventHandle ) === false ) {
-
- if ( elem.addEventListener ) {
- elem.addEventListener( type, eventHandle );
- }
- }
- }
-
- if ( special.add ) {
- special.add.call( elem, handleObj );
-
- if ( !handleObj.handler.guid ) {
- handleObj.handler.guid = handler.guid;
- }
- }
-
- // Add to the element's handler list, delegates in front
- if ( selector ) {
- handlers.splice( handlers.delegateCount++, 0, handleObj );
- } else {
- handlers.push( handleObj );
- }
-
- // Keep track of which events have ever been used, for event optimization
- jQuery.event.global[ type ] = true;
- }
-
- },
-
- // Detach an event or set of events from an element
- remove: function( elem, types, handler, selector, mappedTypes ) {
-
- var j, origCount, tmp,
- events, t, handleObj,
- special, handlers, type, namespaces, origType,
- elemData = dataPriv.hasData( elem ) && dataPriv.get( elem );
-
- if ( !elemData || !( events = elemData.events ) ) {
- return;
- }
-
- // Once for each type.namespace in types; type may be omitted
- types = ( types || "" ).match( rnothtmlwhite ) || [ "" ];
- t = types.length;
- while ( t-- ) {
- tmp = rtypenamespace.exec( types[ t ] ) || [];
- type = origType = tmp[ 1 ];
- namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort();
-
- // Unbind all events (on this namespace, if provided) for the element
- if ( !type ) {
- for ( type in events ) {
- jQuery.event.remove( elem, type + types[ t ], handler, selector, true );
- }
- continue;
- }
-
- special = jQuery.event.special[ type ] || {};
- type = ( selector ? special.delegateType : special.bindType ) || type;
- handlers = events[ type ] || [];
- tmp = tmp[ 2 ] &&
- new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" );
-
- // Remove matching events
- origCount = j = handlers.length;
- while ( j-- ) {
- handleObj = handlers[ j ];
-
- if ( ( mappedTypes || origType === handleObj.origType ) &&
- ( !handler || handler.guid === handleObj.guid ) &&
- ( !tmp || tmp.test( handleObj.namespace ) ) &&
- ( !selector || selector === handleObj.selector ||
- selector === "**" && handleObj.selector ) ) {
- handlers.splice( j, 1 );
-
- if ( handleObj.selector ) {
- handlers.delegateCount--;
- }
- if ( special.remove ) {
- special.remove.call( elem, handleObj );
- }
- }
- }
-
- // Remove generic event handler if we removed something and no more handlers exist
- // (avoids potential for endless recursion during removal of special event handlers)
- if ( origCount && !handlers.length ) {
- if ( !special.teardown ||
- special.teardown.call( elem, namespaces, elemData.handle ) === false ) {
-
- jQuery.removeEvent( elem, type, elemData.handle );
- }
-
- delete events[ type ];
- }
- }
-
- // Remove data and the expando if it's no longer used
- if ( jQuery.isEmptyObject( events ) ) {
- dataPriv.remove( elem, "handle events" );
- }
- },
-
- dispatch: function( nativeEvent ) {
-
- // Make a writable jQuery.Event from the native event object
- var event = jQuery.event.fix( nativeEvent );
-
- var i, j, ret, matched, handleObj, handlerQueue,
- args = new Array( arguments.length ),
- handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [],
- special = jQuery.event.special[ event.type ] || {};
-
- // Use the fix-ed jQuery.Event rather than the (read-only) native event
- args[ 0 ] = event;
-
- for ( i = 1; i < arguments.length; i++ ) {
- args[ i ] = arguments[ i ];
- }
-
- event.delegateTarget = this;
-
- // Call the preDispatch hook for the mapped type, and let it bail if desired
- if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) {
- return;
- }
-
- // Determine handlers
- handlerQueue = jQuery.event.handlers.call( this, event, handlers );
-
- // Run delegates first; they may want to stop propagation beneath us
- i = 0;
- while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) {
- event.currentTarget = matched.elem;
-
- j = 0;
- while ( ( handleObj = matched.handlers[ j++ ] ) &&
- !event.isImmediatePropagationStopped() ) {
-
- // Triggered event must either 1) have no namespace, or 2) have namespace(s)
- // a subset or equal to those in the bound event (both can have no namespace).
- if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) {
-
- event.handleObj = handleObj;
- event.data = handleObj.data;
-
- ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle ||
- handleObj.handler ).apply( matched.elem, args );
-
- if ( ret !== undefined ) {
- if ( ( event.result = ret ) === false ) {
- event.preventDefault();
- event.stopPropagation();
- }
- }
- }
- }
- }
-
- // Call the postDispatch hook for the mapped type
- if ( special.postDispatch ) {
- special.postDispatch.call( this, event );
- }
-
- return event.result;
- },
-
- handlers: function( event, handlers ) {
- var i, handleObj, sel, matchedHandlers, matchedSelectors,
- handlerQueue = [],
- delegateCount = handlers.delegateCount,
- cur = event.target;
-
- // Find delegate handlers
- if ( delegateCount &&
-
- // Support: IE <=9
- // Black-hole SVG instance trees (trac-13180)
- cur.nodeType &&
-
- // Support: Firefox <=42
- // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861)
- // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click
- // Support: IE 11 only
- // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343)
- !( event.type === "click" && event.button >= 1 ) ) {
-
- for ( ; cur !== this; cur = cur.parentNode || this ) {
-
- // Don't check non-elements (#13208)
- // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764)
- if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) {
- matchedHandlers = [];
- matchedSelectors = {};
- for ( i = 0; i < delegateCount; i++ ) {
- handleObj = handlers[ i ];
-
- // Don't conflict with Object.prototype properties (#13203)
- sel = handleObj.selector + " ";
-
- if ( matchedSelectors[ sel ] === undefined ) {
- matchedSelectors[ sel ] = handleObj.needsContext ?
- jQuery( sel, this ).index( cur ) > -1 :
- jQuery.find( sel, this, null, [ cur ] ).length;
- }
- if ( matchedSelectors[ sel ] ) {
- matchedHandlers.push( handleObj );
- }
- }
- if ( matchedHandlers.length ) {
- handlerQueue.push( { elem: cur, handlers: matchedHandlers } );
- }
- }
- }
- }
-
- // Add the remaining (directly-bound) handlers
- cur = this;
- if ( delegateCount < handlers.length ) {
- handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } );
- }
-
- return handlerQueue;
- },
-
- addProp: function( name, hook ) {
- Object.defineProperty( jQuery.Event.prototype, name, {
- enumerable: true,
- configurable: true,
-
- get: jQuery.isFunction( hook ) ?
- function() {
- if ( this.originalEvent ) {
- return hook( this.originalEvent );
- }
- } :
- function() {
- if ( this.originalEvent ) {
- return this.originalEvent[ name ];
- }
- },
-
- set: function( value ) {
- Object.defineProperty( this, name, {
- enumerable: true,
- configurable: true,
- writable: true,
- value: value
- } );
- }
- } );
- },
-
- fix: function( originalEvent ) {
- return originalEvent[ jQuery.expando ] ?
- originalEvent :
- new jQuery.Event( originalEvent );
- },
-
- special: {
- load: {
-
- // Prevent triggered image.load events from bubbling to window.load
- noBubble: true
- },
- focus: {
-
- // Fire native event if possible so blur/focus sequence is correct
- trigger: function() {
- if ( this !== safeActiveElement() && this.focus ) {
- this.focus();
- return false;
- }
- },
- delegateType: "focusin"
- },
- blur: {
- trigger: function() {
- if ( this === safeActiveElement() && this.blur ) {
- this.blur();
- return false;
- }
- },
- delegateType: "focusout"
- },
- click: {
-
- // For checkbox, fire native event so checked state will be right
- trigger: function() {
- if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) {
- this.click();
- return false;
- }
- },
-
- // For cross-browser consistency, don't fire native .click() on links
- _default: function( event ) {
- return nodeName( event.target, "a" );
- }
- },
-
- beforeunload: {
- postDispatch: function( event ) {
-
- // Support: Firefox 20+
- // Firefox doesn't alert if the returnValue field is not set.
- if ( event.result !== undefined && event.originalEvent ) {
- event.originalEvent.returnValue = event.result;
- }
- }
- }
- }
-};
-
-jQuery.removeEvent = function( elem, type, handle ) {
-
- // This "if" is needed for plain objects
- if ( elem.removeEventListener ) {
- elem.removeEventListener( type, handle );
- }
-};
-
-jQuery.Event = function( src, props ) {
-
- // Allow instantiation without the 'new' keyword
- if ( !( this instanceof jQuery.Event ) ) {
- return new jQuery.Event( src, props );
- }
-
- // Event object
- if ( src && src.type ) {
- this.originalEvent = src;
- this.type = src.type;
-
- // Events bubbling up the document may have been marked as prevented
- // by a handler lower down the tree; reflect the correct value.
- this.isDefaultPrevented = src.defaultPrevented ||
- src.defaultPrevented === undefined &&
-
- // Support: Android <=2.3 only
- src.returnValue === false ?
- returnTrue :
- returnFalse;
-
- // Create target properties
- // Support: Safari <=6 - 7 only
- // Target should not be a text node (#504, #13143)
- this.target = ( src.target && src.target.nodeType === 3 ) ?
- src.target.parentNode :
- src.target;
-
- this.currentTarget = src.currentTarget;
- this.relatedTarget = src.relatedTarget;
-
- // Event type
- } else {
- this.type = src;
- }
-
- // Put explicitly provided properties onto the event object
- if ( props ) {
- jQuery.extend( this, props );
- }
-
- // Create a timestamp if incoming event doesn't have one
- this.timeStamp = src && src.timeStamp || jQuery.now();
-
- // Mark it as fixed
- this[ jQuery.expando ] = true;
-};
-
-// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding
-// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html
-jQuery.Event.prototype = {
- constructor: jQuery.Event,
- isDefaultPrevented: returnFalse,
- isPropagationStopped: returnFalse,
- isImmediatePropagationStopped: returnFalse,
- isSimulated: false,
-
- preventDefault: function() {
- var e = this.originalEvent;
-
- this.isDefaultPrevented = returnTrue;
-
- if ( e && !this.isSimulated ) {
- e.preventDefault();
- }
- },
- stopPropagation: function() {
- var e = this.originalEvent;
-
- this.isPropagationStopped = returnTrue;
-
- if ( e && !this.isSimulated ) {
- e.stopPropagation();
- }
- },
- stopImmediatePropagation: function() {
- var e = this.originalEvent;
-
- this.isImmediatePropagationStopped = returnTrue;
-
- if ( e && !this.isSimulated ) {
- e.stopImmediatePropagation();
- }
-
- this.stopPropagation();
- }
-};
-
-// Includes all common event props including KeyEvent and MouseEvent specific props
-jQuery.each( {
- altKey: true,
- bubbles: true,
- cancelable: true,
- changedTouches: true,
- ctrlKey: true,
- detail: true,
- eventPhase: true,
- metaKey: true,
- pageX: true,
- pageY: true,
- shiftKey: true,
- view: true,
- "char": true,
- charCode: true,
- key: true,
- keyCode: true,
- button: true,
- buttons: true,
- clientX: true,
- clientY: true,
- offsetX: true,
- offsetY: true,
- pointerId: true,
- pointerType: true,
- screenX: true,
- screenY: true,
- targetTouches: true,
- toElement: true,
- touches: true,
-
- which: function( event ) {
- var button = event.button;
-
- // Add which for key events
- if ( event.which == null && rkeyEvent.test( event.type ) ) {
- return event.charCode != null ? event.charCode : event.keyCode;
- }
-
- // Add which for click: 1 === left; 2 === middle; 3 === right
- if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) {
- if ( button & 1 ) {
- return 1;
- }
-
- if ( button & 2 ) {
- return 3;
- }
-
- if ( button & 4 ) {
- return 2;
- }
-
- return 0;
- }
-
- return event.which;
- }
-}, jQuery.event.addProp );
-
-// Create mouseenter/leave events using mouseover/out and event-time checks
-// so that event delegation works in jQuery.
-// Do the same for pointerenter/pointerleave and pointerover/pointerout
-//
-// Support: Safari 7 only
-// Safari sends mouseenter too often; see:
-// https://bugs.chromium.org/p/chromium/issues/detail?id=470258
-// for the description of the bug (it existed in older Chrome versions as well).
-jQuery.each( {
- mouseenter: "mouseover",
- mouseleave: "mouseout",
- pointerenter: "pointerover",
- pointerleave: "pointerout"
-}, function( orig, fix ) {
- jQuery.event.special[ orig ] = {
- delegateType: fix,
- bindType: fix,
-
- handle: function( event ) {
- var ret,
- target = this,
- related = event.relatedTarget,
- handleObj = event.handleObj;
-
- // For mouseenter/leave call the handler if related is outside the target.
- // NB: No relatedTarget if the mouse left/entered the browser window
- if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) {
- event.type = handleObj.origType;
- ret = handleObj.handler.apply( this, arguments );
- event.type = fix;
- }
- return ret;
- }
- };
-} );
-
-jQuery.fn.extend( {
-
- on: function( types, selector, data, fn ) {
- return on( this, types, selector, data, fn );
- },
- one: function( types, selector, data, fn ) {
- return on( this, types, selector, data, fn, 1 );
- },
- off: function( types, selector, fn ) {
- var handleObj, type;
- if ( types && types.preventDefault && types.handleObj ) {
-
- // ( event ) dispatched jQuery.Event
- handleObj = types.handleObj;
- jQuery( types.delegateTarget ).off(
- handleObj.namespace ?
- handleObj.origType + "." + handleObj.namespace :
- handleObj.origType,
- handleObj.selector,
- handleObj.handler
- );
- return this;
- }
- if ( typeof types === "object" ) {
-
- // ( types-object [, selector] )
- for ( type in types ) {
- this.off( type, selector, types[ type ] );
- }
- return this;
- }
- if ( selector === false || typeof selector === "function" ) {
-
- // ( types [, fn] )
- fn = selector;
- selector = undefined;
- }
- if ( fn === false ) {
- fn = returnFalse;
- }
- return this.each( function() {
- jQuery.event.remove( this, types, fn, selector );
- } );
- }
-} );
-
-
-var
-
- /* eslint-disable max-len */
-
- // See https://github.com/eslint/eslint/issues/3229
- rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,
-
- /* eslint-enable */
-
- // Support: IE <=10 - 11, Edge 12 - 13
- // In IE/Edge using regex groups here causes severe slowdowns.
- // See https://connect.microsoft.com/IE/feedback/details/1736512/
- rnoInnerhtml = /
-
-
-
-
-
-
-
-
-
-
-
-{% endblock %}
diff --git a/advanced_source/ONNXLive.rst b/advanced_source/ONNXLive.rst
index 21380e43405..7177522c968 100644
--- a/advanced_source/ONNXLive.rst
+++ b/advanced_source/ONNXLive.rst
@@ -2,172 +2,11 @@
ONNX Live Tutorial
==================
-This tutorial will show you to convert a neural style transfer model that has been exported from PyTorch into the Apple CoreML format using ONNX. This will allow you to easily run deep learning models on Apple devices and, in this case, live stream from the camera.
+This tutorial has been deprecated.
-What is ONNX?
--------------
+Redirecting in 3 seconds...
-ONNX (Open Neural Network Exchange) is an open format to represent deep learning models. With ONNX, AI developers can more easily move models between state-of-the-art tools and choose the combination that is best for them. ONNX is developed and supported by a community of partners. You can learn more about ONNX and what tools are supported by going to `onnx.ai `_.
-Tutorial Overview
------------------
+.. raw:: html
-This tutorial will walk you through 4 main steps:
-
-
-#. `Download (or train) PyTorch style transfer models`_
-#. `Convert the PyTorch models to ONNX models`_
-#. `Convert the ONNX models to CoreML models`_
-#. `Run the CoreML models in a style transfer iOS App`_
-
-Preparing the Environment
--------------------------
-
-We will be working in a virtualenv in order to avoid conflicts with your local packages.
-We are also using Python 3.6 for this tutorial, but other versions should work as well.
-
-.. code-block:: python
-
- python3.6 -m venv venv
- source ./venv/bin/activate
-
-
-You need to install pytorch and the onnx->coreml converter:
-
-.. code-block:: bash
-
- pip install torchvision onnx-coreml
-
-
-You will also need to install XCode if you want to run the iOS style transfer app on your iPhone.
-You can also convert models in Linux, however to run the iOS app itself, you will need a Mac.
-
-Download (or train) PyTorch style transfer models
--------------------------------------------------
-
-For this tutorial, we will use the style transfer models that are published with pytorch in https://github.com/pytorch/examples/tree/master/fast_neural_style .
-If you would like to use a different PyTorch or ONNX model, feel free to skip this step.
-
-These models are meant for applying style transfer on still images and really not optimized to be fast enough for video. However if we reduce the resolution low enough, they can also work well on videos.
-
-Let's download the models:
-
-.. code-block:: bash
-
- git clone https://github.com/pytorch/examples
- cd examples/fast_neural_style
-
-
-If you would like to train the models yourself, the pytorch/examples repository you just cloned has more information on how to do this.
-For now, we'll just download pre-trained models with the script provided by the repository:
-
-.. code-block:: bash
-
- python download_saved_models.py
-
-
-This script downloads the pre-trained PyTorch models and puts them into the ``saved_models`` folder.
-There should now be 4 files, ``candy.pth``\ , ``mosaic.pth``\ , ``rain_princess.pth`` and ``udnie.pth`` in your directory.
-
-Convert the PyTorch models to ONNX models
------------------------------------------
-
-Now that we have the pre-trained PyTorch models as ``.pth`` files in the ``saved_models`` folder, we will need to convert them to ONNX format.
-The model definition is in the pytorch/examples repository we cloned previously, and with a few lines of python we can export it to ONNX.
-In this case, instead of actually running the neural net, we will call ``torch.onnx._export``\ , which is provided with PyTorch as an api to directly export ONNX formatted models from PyTorch.
-However, in this case we don't even need to do that, because a script already exists ``neural_style/neural_style.py`` that will do this for us.
-You can also take a look at that script if you would like to apply it to other models.
-
-Exporting the ONNX format from PyTorch is essentially tracing your neural network so this api call will internally run the network on 'dummy data' in order to generate the graph.
-For this, it needs an input image to apply the style transfer to which can simply be a blank image.
-However, the pixel size of this image is important, as this will be the size for the exported style transfer model.
-To get good performance, we'll use a resolution of 250x540. Feel free to take a larger resolution if you care less about
-FPS and more about style transfer quality.
-
-Let's use `ImageMagick `_ to create a blank image of the resolution we want:
-
-.. code-block:: bash
-
- convert -size 250x540 xc:white png24:dummy.jpg
-
-
-and use that to export the PyTorch models:
-
-.. code-block:: bash
-
- python ./neural_style/neural_style.py eval --content-image dummy.jpg --output-image dummy-out.jpg --model ./saved_models/candy.pth --cuda 0 --export_onnx ./saved_models/candy.onnx
- python ./neural_style/neural_style.py eval --content-image dummy.jpg --output-image dummy-out.jpg --model ./saved_models/udnie.pth --cuda 0 --export_onnx ./saved_models/udnie.onnx
- python ./neural_style/neural_style.py eval --content-image dummy.jpg --output-image dummy-out.jpg --model ./saved_models/rain_princess.pth --cuda 0 --export_onnx ./saved_models/rain_princess.onnx
- python ./neural_style/neural_style.py eval --content-image dummy.jpg --output-image dummy-out.jpg --model ./saved_models/mosaic.pth --cuda 0 --export_onnx ./saved_models/mosaic.onnx
-
-
-You should end up with 4 files, ``candy.onnx``\ , ``mosaic.onnx``\ , ``rain_princess.onnx`` and ``udnie.onnx``\ ,
-created from the corresponding ``.pth`` files.
-
-Convert the ONNX models to CoreML models
-----------------------------------------
-
-Now that we have ONNX models, we can convert them to CoreML models in order to run them on Apple devices.
-For this, we use the onnx-coreml converter we installed previously.
-The converter comes with a ``convert-onnx-to-coreml`` script, which the installation steps above added to our path. Unfortunately that won't work for us as we need to mark the input and output of the network as an image
-and, while this is supported by the converter, it is only supported when calling the converter from python.
-
-Looking at the style transfer model (for example opening the .onnx file in an application like `Netron `_\ ),
-we see that the input is named '0' and the output is named '186'. These are just numeric ids assigned by PyTorch.
-We will need to mark these as images.
-
-So let's create a small python file and call it ``onnx_to_coreml.py``. This can be created by using the touch command and edited with your favorite editor to add the following lines of code.
-
-.. code-block:: python
-
- import sys
- from onnx import onnx_pb
- from onnx_coreml import convert
-
- model_in = sys.argv[1]
- model_out = sys.argv[2]
-
- model_file = open(model_in, 'rb')
- model_proto = onnx_pb.ModelProto()
- model_proto.ParseFromString(model_file.read())
- coreml_model = convert(model_proto, image_input_names=['0'], image_output_names=['186'])
- coreml_model.save(model_out)
-
-
-we now run it:
-
-.. code-block:: bash
-
- python onnx_to_coreml.py ./saved_models/candy.onnx ./saved_models/candy.mlmodel
- python onnx_to_coreml.py ./saved_models/udnie.onnx ./saved_models/udnie.mlmodel
- python onnx_to_coreml.py ./saved_models/rain_princess.onnx ./saved_models/rain_princess.mlmodel
- python onnx_to_coreml.py ./saved_models/mosaic.onnx ./saved_models/mosaic.mlmodel
-
-
-Now, there should be 4 CoreML models in your ``saved_models`` directory: ``candy.mlmodel``\ , ``mosaic.mlmodel``\ , ``rain_princess.mlmodel`` and ``udnie.mlmodel``.
-
-Run the CoreML models in a style transfer iOS App
--------------------------------------------------
-
-This repository (i.e. the one you're currently reading the README.md of) contains an iOS app able to run CoreML style transfer models on a live camera stream from your phone camera. Let's clone the repository:
-
-.. code-block:: bash
-
- git clone https://github.com/onnx/tutorials
-
-
-and open the ``tutorials/examples/CoreML/ONNXLive/ONNXLive.xcodeproj`` project in XCode.
-We recommend using XCode 9.3 and an iPhone X. There might be issues running on older devices or XCode versions.
-
-In the ``Models/`` folder, the project contains some .mlmodel files. We're going to replace them with the models we just created.
-
-You then run the app on your iPhone and you are all set. Tapping on the screen switches through the models.
-
-Conclusion
-----------
-
-We hope this tutorial gave you an overview of what ONNX is about and how you can use it to convert neural networks
-between frameworks, in this case neural style transfer models moving from PyTorch to CoreML.
-
-Feel free to experiment with these steps and test them on your own models.
-Please let us know if you hit any issues or want to give feedback. We'd like to hear what you think.
+
diff --git a/advanced_source/README.txt b/advanced_source/README.txt
index 50dacb8c362..56f01688089 100644
--- a/advanced_source/README.txt
+++ b/advanced_source/README.txt
@@ -8,11 +8,3 @@ Advanced Tutorials
2. numpy_extensions_tutorial.py
Creating Extensions Using numpy and scipy
https://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html
-
-3. c_extension.rst
- Custom C Extensions for PyTorch
- https://pytorch.org/tutorials/advanced/c_extension.html
-
-4. super_resolution_with_caffe2.py
- Transfering a Model from PyTorch to Caffe2 and Mobile using ONNX
- https://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html
diff --git a/advanced_source/coding_ddpg.py b/advanced_source/coding_ddpg.py
new file mode 100644
index 00000000000..90ea4565dab
--- /dev/null
+++ b/advanced_source/coding_ddpg.py
@@ -0,0 +1,1220 @@
+# -*- coding: utf-8 -*-
+"""
+TorchRL objectives: Coding a DDPG loss
+======================================
+**Author**: `Vincent Moens `_
+
+"""
+
+##############################################################################
+# Overview
+# --------
+#
+# TorchRL separates the training of RL algorithms in various pieces that will be
+# assembled in your training script: the environment, the data collection and
+# storage, the model and finally the loss function.
+#
+# TorchRL losses (or "objectives") are stateful objects that contain the
+# trainable parameters (policy and value models).
+# This tutorial will guide you through the steps to code a loss from the ground up
+# using TorchRL.
+#
+# To this aim, we will be focusing on DDPG, which is a relatively straightforward
+# algorithm to code.
+# `Deep Deterministic Policy Gradient `_ (DDPG)
+# is a simple continuous control algorithm. It consists in learning a
+# parametric value function for an action-observation pair, and
+# then learning a policy that outputs actions that maximize this value
+# function given a certain observation.
+#
+# What you will learn:
+#
+# - how to write a loss module and customize its value estimator;
+# - how to build an environment in TorchRL, including transforms
+# (for example, data normalization) and parallel execution;
+# - how to design a policy and value network;
+# - how to collect data from your environment efficiently and store them
+# in a replay buffer;
+# - how to store trajectories (and not transitions) in your replay buffer);
+# - how to evaluate your model.
+#
+# Prerequisites
+# ~~~~~~~~~~~~~
+#
+# This tutorial assumes that you have completed the
+# `PPO tutorial `_ which gives
+# an overview of the TorchRL components and dependencies, such as
+# :class:`tensordict.TensorDict` and :class:`tensordict.nn.TensorDictModules`,
+# although it should be
+# sufficiently transparent to be understood without a deep understanding of
+# these classes.
+#
+# .. note::
+# We do not aim at giving a SOTA implementation of the algorithm, but rather
+# to provide a high-level illustration of TorchRL's loss implementations
+# and the library features that are to be used in the context of
+# this algorithm.
+#
+# Imports and setup
+# -----------------
+#
+# .. code-block:: bash
+#
+# %%bash
+# pip3 install torchrl mujoco glfw
+
+# sphinx_gallery_start_ignore
+import warnings
+
+warnings.filterwarnings("ignore")
+from torch import multiprocessing
+
+# TorchRL prefers spawn method, that restricts creation of ``~torchrl.envs.ParallelEnv`` inside
+# `__main__` method call, but for the easy of reading the code switch to fork
+# which is also a default spawn method in Google's Colaboratory
+try:
+ multiprocessing.set_start_method("fork")
+except RuntimeError:
+ pass
+
+# sphinx_gallery_end_ignore
+
+
+import torch
+import tqdm
+
+
+###############################################################################
+# We will execute the policy on CUDA if available
+is_fork = multiprocessing.get_start_method() == "fork"
+device = (
+ torch.device(0)
+ if torch.cuda.is_available() and not is_fork
+ else torch.device("cpu")
+)
+collector_device = torch.device("cpu") # Change the device to ``cuda`` to use CUDA
+
+###############################################################################
+# TorchRL :class:`~torchrl.objectives.LossModule`
+# -----------------------------------------------
+#
+# TorchRL provides a series of losses to use in your training scripts.
+# The aim is to have losses that are easily reusable/swappable and that have
+# a simple signature.
+#
+# The main characteristics of TorchRL losses are:
+#
+# - They are stateful objects: they contain a copy of the trainable parameters
+# such that ``loss_module.parameters()`` gives whatever is needed to train the
+# algorithm.
+# - They follow the ``TensorDict`` convention: the :meth:`torch.nn.Module.forward`
+# method will receive a TensorDict as input that contains all the necessary
+# information to return a loss value.
+#
+# >>> data = replay_buffer.sample()
+# >>> loss_dict = loss_module(data)
+#
+# - They output a :class:`tensordict.TensorDict` instance with the loss values
+# written under a ``"loss_"`` where ``smth`` is a string describing the
+# loss. Additional keys in the ``TensorDict`` may be useful metrics to log during
+# training time.
+#
+# .. note::
+# The reason we return independent losses is to let the user use a different
+# optimizer for different sets of parameters for instance. Summing the losses
+# can be simply done via
+#
+# >>> loss_val = sum(loss for key, loss in loss_dict.items() if key.startswith("loss_"))
+#
+# The ``__init__`` method
+# ~~~~~~~~~~~~~~~~~~~~~~~
+#
+# The parent class of all losses is :class:`~torchrl.objectives.LossModule`.
+# As many other components of the library, its :meth:`~torchrl.objectives.LossModule.forward` method expects
+# as input a :class:`tensordict.TensorDict` instance sampled from an experience
+# replay buffer, or any similar data structure. Using this format makes it
+# possible to re-use the module across
+# modalities, or in complex settings where the model needs to read multiple
+# entries for instance. In other words, it allows us to code a loss module that
+# is oblivious to the data type that is being given to is and that focuses on
+# running the elementary steps of the loss function and only those.
+#
+# To keep the tutorial as didactic as we can, we'll be displaying each method
+# of the class independently and we'll be populating the class at a later
+# stage.
+#
+# Let us start with the :meth:`~torchrl.objectives.LossModule.__init__`
+# method. DDPG aims at solving a control task with a simple strategy:
+# training a policy to output actions that maximize the value predicted by
+# a value network. Hence, our loss module needs to receive two networks in its
+# constructor: an actor and a value networks. We expect both of these to be
+# TensorDict-compatible objects, such as
+# :class:`tensordict.nn.TensorDictModule`.
+# Our loss function will need to compute a target value and fit the value
+# network to this, and generate an action and fit the policy such that its
+# value estimate is maximized.
+#
+# The crucial step of the :meth:`LossModule.__init__` method is the call to
+# :meth:`~torchrl.LossModule.convert_to_functional`. This method will extract
+# the parameters from the module and convert it to a functional module.
+# Strictly speaking, this is not necessary and one may perfectly code all
+# the losses without it. However, we encourage its usage for the following
+# reason.
+#
+# The reason TorchRL does this is that RL algorithms often execute the same
+# model with different sets of parameters, called "trainable" and "target"
+# parameters.
+# The "trainable" parameters are those that the optimizer needs to fit. The
+# "target" parameters are usually a copy of the former's with some time lag
+# (absolute or diluted through a moving average).
+# These target parameters are used to compute the value associated with the
+# next observation. One the advantages of using a set of target parameters
+# for the value model that do not match exactly the current configuration is
+# that they provide a pessimistic bound on the value function being computed.
+# Pay attention to the ``create_target_params`` keyword argument below: this
+# argument tells the :meth:`~torchrl.objectives.LossModule.convert_to_functional`
+# method to create a set of target parameters in the loss module to be used
+# for target value computation. If this is set to ``False`` (see the actor network
+# for instance) the ``target_actor_network_params`` attribute will still be
+# accessible but this will just return a **detached** version of the
+# actor parameters.
+#
+# Later, we will see how the target parameters should be updated in TorchRL.
+#
+
+from tensordict.nn import TensorDictModule, TensorDictSequential
+
+
+def _init(
+ self,
+ actor_network: TensorDictModule,
+ value_network: TensorDictModule,
+) -> None:
+ super(type(self), self).__init__()
+
+ self.convert_to_functional(
+ actor_network,
+ "actor_network",
+ create_target_params=True,
+ )
+ self.convert_to_functional(
+ value_network,
+ "value_network",
+ create_target_params=True,
+ compare_against=list(actor_network.parameters()),
+ )
+
+ self.actor_in_keys = actor_network.in_keys
+
+ # Since the value we'll be using is based on the actor and value network,
+ # we put them together in a single actor-critic container.
+ actor_critic = ActorCriticWrapper(actor_network, value_network)
+ self.actor_critic = actor_critic
+ self.loss_function = "l2"
+
+
+###############################################################################
+# The value estimator loss method
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# In many RL algorithm, the value network (or Q-value network) is trained based
+# on an empirical value estimate. This can be bootstrapped (TD(0), low
+# variance, high bias), meaning
+# that the target value is obtained using the next reward and nothing else, or
+# a Monte-Carlo estimate can be obtained (TD(1)) in which case the whole
+# sequence of upcoming rewards will be used (high variance, low bias). An
+# intermediate estimator (TD(:math:`\lambda`)) can also be used to compromise
+# bias and variance.
+# TorchRL makes it easy to use one or the other estimator via the
+# :class:`~torchrl.objectives.utils.ValueEstimators` Enum class, which contains
+# pointers to all the value estimators implemented. Let us define the default
+# value function here. We will take the simplest version (TD(0)), and show later
+# on how this can be changed.
+
+from torchrl.objectives.utils import ValueEstimators
+
+default_value_estimator = ValueEstimators.TD0
+
+###############################################################################
+# We also need to give some instructions to DDPG on how to build the value
+# estimator, depending on the user query. Depending on the estimator provided,
+# we will build the corresponding module to be used at train time:
+
+from torchrl.objectives.utils import default_value_kwargs
+from torchrl.objectives.value import TD0Estimator, TD1Estimator, TDLambdaEstimator
+
+
+def make_value_estimator(self, value_type: ValueEstimators, **hyperparams):
+ hp = dict(default_value_kwargs(value_type))
+ if hasattr(self, "gamma"):
+ hp["gamma"] = self.gamma
+ hp.update(hyperparams)
+ value_key = "state_action_value"
+ if value_type == ValueEstimators.TD1:
+ self._value_estimator = TD1Estimator(value_network=self.actor_critic, **hp)
+ elif value_type == ValueEstimators.TD0:
+ self._value_estimator = TD0Estimator(value_network=self.actor_critic, **hp)
+ elif value_type == ValueEstimators.GAE:
+ raise NotImplementedError(
+ f"Value type {value_type} it not implemented for loss {type(self)}."
+ )
+ elif value_type == ValueEstimators.TDLambda:
+ self._value_estimator = TDLambdaEstimator(value_network=self.actor_critic, **hp)
+ else:
+ raise NotImplementedError(f"Unknown value type {value_type}")
+ self._value_estimator.set_keys(value=value_key)
+
+
+###############################################################################
+# The ``make_value_estimator`` method can but does not need to be called: if
+# not, the :class:`~torchrl.objectives.LossModule` will query this method with
+# its default estimator.
+#
+# The actor loss method
+# ~~~~~~~~~~~~~~~~~~~~~
+#
+# The central piece of an RL algorithm is the training loss for the actor.
+# In the case of DDPG, this function is quite simple: we just need to compute
+# the value associated with an action computed using the policy and optimize
+# the actor weights to maximize this value.
+#
+# When computing this value, we must make sure to take the value parameters out
+# of the graph, otherwise the actor and value loss will be mixed up.
+# For this, the :func:`~torchrl.objectives.utils.hold_out_params` function
+# can be used.
+
+
+def _loss_actor(
+ self,
+ tensordict,
+) -> torch.Tensor:
+ td_copy = tensordict.select(*self.actor_in_keys)
+ # Get an action from the actor network: since we made it functional, we need to pass the params
+ with self.actor_network_params.to_module(self.actor_network):
+ td_copy = self.actor_network(td_copy)
+ # get the value associated with that action
+ with self.value_network_params.detach().to_module(self.value_network):
+ td_copy = self.value_network(td_copy)
+ return -td_copy.get("state_action_value")
+
+
+###############################################################################
+# The value loss method
+# ~~~~~~~~~~~~~~~~~~~~~
+#
+# We now need to optimize our value network parameters.
+# To do this, we will rely on the value estimator of our class:
+#
+
+from torchrl.objectives.utils import distance_loss
+
+
+def _loss_value(
+ self,
+ tensordict,
+):
+ td_copy = tensordict.clone()
+
+ # V(s, a)
+ with self.value_network_params.to_module(self.value_network):
+ self.value_network(td_copy)
+ pred_val = td_copy.get("state_action_value").squeeze(-1)
+
+ # we manually reconstruct the parameters of the actor-critic, where the first
+ # set of parameters belongs to the actor and the second to the value function.
+ target_params = TensorDict(
+ {
+ "module": {
+ "0": self.target_actor_network_params,
+ "1": self.target_value_network_params,
+ }
+ },
+ batch_size=self.target_actor_network_params.batch_size,
+ device=self.target_actor_network_params.device,
+ )
+ with target_params.to_module(self.actor_critic):
+ target_value = self.value_estimator.value_estimate(tensordict).squeeze(-1)
+
+ # Computes the value loss: L2, L1 or smooth L1 depending on `self.loss_function`
+ loss_value = distance_loss(pred_val, target_value, loss_function=self.loss_function)
+ td_error = (pred_val - target_value).pow(2)
+
+ return loss_value, td_error, pred_val, target_value
+
+
+###############################################################################
+# Putting things together in a forward call
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# The only missing piece is the forward method, which will glue together the
+# value and actor loss, collect the cost values and write them in a ``TensorDict``
+# delivered to the user.
+
+from tensordict import TensorDict, TensorDictBase
+
+
+def _forward(self, input_tensordict: TensorDictBase) -> TensorDict:
+ loss_value, td_error, pred_val, target_value = self.loss_value(
+ input_tensordict,
+ )
+ td_error = td_error.detach()
+ td_error = td_error.unsqueeze(input_tensordict.ndimension())
+ if input_tensordict.device is not None:
+ td_error = td_error.to(input_tensordict.device)
+ input_tensordict.set(
+ "td_error",
+ td_error,
+ inplace=True,
+ )
+ loss_actor = self.loss_actor(input_tensordict)
+ return TensorDict(
+ source={
+ "loss_actor": loss_actor.mean(),
+ "loss_value": loss_value.mean(),
+ "pred_value": pred_val.mean().detach(),
+ "target_value": target_value.mean().detach(),
+ "pred_value_max": pred_val.max().detach(),
+ "target_value_max": target_value.max().detach(),
+ },
+ batch_size=[],
+ )
+
+
+from torchrl.objectives import LossModule
+
+
+class DDPGLoss(LossModule):
+ default_value_estimator = default_value_estimator
+ make_value_estimator = make_value_estimator
+
+ __init__ = _init
+ forward = _forward
+ loss_value = _loss_value
+ loss_actor = _loss_actor
+
+
+###############################################################################
+# Now that we have our loss, we can use it to train a policy to solve a
+# control task.
+#
+# Environment
+# -----------
+#
+# In most algorithms, the first thing that needs to be taken care of is the
+# construction of the environment as it conditions the remainder of the
+# training script.
+#
+# For this example, we will be using the ``"cheetah"`` task. The goal is to make
+# a half-cheetah run as fast as possible.
+#
+# In TorchRL, one can create such a task by relying on ``dm_control`` or ``gym``:
+#
+# .. code-block:: python
+#
+# env = GymEnv("HalfCheetah-v4")
+#
+# or
+#
+# .. code-block:: python
+#
+# env = DMControlEnv("cheetah", "run")
+#
+# By default, these environment disable rendering. Training from states is
+# usually easier than training from images. To keep things simple, we focus
+# on learning from states only. To pass the pixels to the ``tensordicts`` that
+# are collected by :func:`env.step()`, simply pass the ``from_pixels=True``
+# argument to the constructor:
+#
+# .. code-block:: python
+#
+# env = GymEnv("HalfCheetah-v4", from_pixels=True, pixels_only=True)
+#
+# We write a :func:`make_env` helper function that will create an environment
+# with either one of the two backends considered above (``dm-control`` or ``gym``).
+#
+
+from torchrl.envs.libs.dm_control import DMControlEnv
+from torchrl.envs.libs.gym import GymEnv
+
+env_library = None
+env_name = None
+
+
+def make_env(from_pixels=False):
+ """Create a base ``env``."""
+ global env_library
+ global env_name
+
+ if backend == "dm_control":
+ env_name = "cheetah"
+ env_task = "run"
+ env_args = (env_name, env_task)
+ env_library = DMControlEnv
+ elif backend == "gym":
+ env_name = "HalfCheetah-v4"
+ env_args = (env_name,)
+ env_library = GymEnv
+ else:
+ raise NotImplementedError
+
+ env_kwargs = {
+ "device": device,
+ "from_pixels": from_pixels,
+ "pixels_only": from_pixels,
+ "frame_skip": 2,
+ }
+ env = env_library(*env_args, **env_kwargs)
+ return env
+
+
+###############################################################################
+# Transforms
+# ~~~~~~~~~~
+#
+# Now that we have a base environment, we may want to modify its representation
+# to make it more policy-friendly. In TorchRL, transforms are appended to the
+# base environment in a specialized :class:`torchr.envs.TransformedEnv` class.
+#
+# - It is common in DDPG to rescale the reward using some heuristic value. We
+# will multiply the reward by 5 in this example.
+#
+# - If we are using :mod:`dm_control`, it is also important to build an interface
+# between the simulator which works with double precision numbers, and our
+# script which presumably uses single precision ones. This transformation goes
+# both ways: when calling :func:`env.step`, our actions will need to be
+# represented in double precision, and the output will need to be transformed
+# to single precision.
+# The :class:`~torchrl.envs.DoubleToFloat` transform does exactly this: the
+# ``in_keys`` list refers to the keys that will need to be transformed from
+# double to float, while the ``in_keys_inv`` refers to those that need to
+# be transformed to double before being passed to the environment.
+#
+# - We concatenate the state keys together using the :class:`~torchrl.envs.CatTensors`
+# transform.
+#
+# - Finally, we also leave the possibility of normalizing the states: we will
+# take care of computing the normalizing constants later on.
+#
+
+from torchrl.envs import (
+ CatTensors,
+ DoubleToFloat,
+ EnvCreator,
+ InitTracker,
+ ObservationNorm,
+ ParallelEnv,
+ RewardScaling,
+ StepCounter,
+ TransformedEnv,
+)
+
+
+def make_transformed_env(
+ env,
+):
+ """Apply transforms to the ``env`` (such as reward scaling and state normalization)."""
+
+ env = TransformedEnv(env)
+
+ # we append transforms one by one, although we might as well create the
+ # transformed environment using the `env = TransformedEnv(base_env, transforms)`
+ # syntax.
+ env.append_transform(RewardScaling(loc=0.0, scale=reward_scaling))
+
+ # We concatenate all states into a single "observation_vector"
+ # even if there is a single tensor, it'll be renamed in "observation_vector".
+ # This facilitates the downstream operations as we know the name of the
+ # output tensor.
+ # In some environments (not half-cheetah), there may be more than one
+ # observation vector: in this case this code snippet will concatenate them
+ # all.
+ selected_keys = list(env.observation_spec.keys())
+ out_key = "observation_vector"
+ env.append_transform(CatTensors(in_keys=selected_keys, out_key=out_key))
+
+ # we normalize the states, but for now let's just instantiate a stateless
+ # version of the transform
+ env.append_transform(ObservationNorm(in_keys=[out_key], standard_normal=True))
+
+ env.append_transform(DoubleToFloat())
+
+ env.append_transform(StepCounter(max_frames_per_traj))
+
+ # We need a marker for the start of trajectories for our Ornstein-Uhlenbeck (OU)
+ # exploration:
+ env.append_transform(InitTracker())
+
+ return env
+
+
+###############################################################################
+# Parallel execution
+# ~~~~~~~~~~~~~~~~~~
+#
+# The following helper function allows us to run environments in parallel.
+# Running environments in parallel can significantly speed up the collection
+# throughput. When using transformed environment, we need to choose whether we
+# want to execute the transform individually for each environment, or
+# centralize the data and transform it in batch. Both approaches are easy to
+# code:
+#
+# .. code-block:: python
+#
+# env = ParallelEnv(
+# lambda: TransformedEnv(GymEnv("HalfCheetah-v4"), transforms),
+# num_workers=4
+# )
+# env = TransformedEnv(
+# ParallelEnv(lambda: GymEnv("HalfCheetah-v4"), num_workers=4),
+# transforms
+# )
+#
+# To leverage the vectorization capabilities of PyTorch, we adopt
+# the first method:
+#
+
+
+def parallel_env_constructor(
+ env_per_collector,
+ transform_state_dict,
+):
+ if env_per_collector == 1:
+
+ def make_t_env():
+ env = make_transformed_env(make_env())
+ env.transform[2].init_stats(3)
+ env.transform[2].loc.copy_(transform_state_dict["loc"])
+ env.transform[2].scale.copy_(transform_state_dict["scale"])
+ return env
+
+ env_creator = EnvCreator(make_t_env)
+ return env_creator
+
+ parallel_env = ParallelEnv(
+ num_workers=env_per_collector,
+ create_env_fn=EnvCreator(lambda: make_env()),
+ create_env_kwargs=None,
+ pin_memory=False,
+ )
+ env = make_transformed_env(parallel_env)
+ # we call `init_stats` for a limited number of steps, just to instantiate
+ # the lazy buffers.
+ env.transform[2].init_stats(3, cat_dim=1, reduce_dim=[0, 1])
+ env.transform[2].load_state_dict(transform_state_dict)
+ return env
+
+
+# The backend can be ``gym`` or ``dm_control``
+backend = "gym"
+
+###############################################################################
+# .. note::
+#
+# ``frame_skip`` batches multiple step together with a single action
+# If > 1, the other frame counts (for example, frames_per_batch, total_frames)
+# need to be adjusted to have a consistent total number of frames collected
+# across experiments. This is important as raising the frame-skip but keeping the
+# total number of frames unchanged may seem like cheating: all things compared,
+# a dataset of 10M elements collected with a frame-skip of 2 and another with
+# a frame-skip of 1 actually have a ratio of interactions with the environment
+# of 2:1! In a nutshell, one should be cautious about the frame-count of a
+# training script when dealing with frame skipping as this may lead to
+# biased comparisons between training strategies.
+#
+# Scaling the reward helps us control the signal magnitude for a more
+# efficient learning.
+reward_scaling = 5.0
+
+###############################################################################
+# We also define when a trajectory will be truncated. A thousand steps (500 if
+# frame-skip = 2) is a good number to use for the cheetah task:
+
+max_frames_per_traj = 500
+
+###############################################################################
+# Normalization of the observations
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# To compute the normalizing statistics, we run an arbitrary number of random
+# steps in the environment and compute the mean and standard deviation of the
+# collected observations. The :func:`ObservationNorm.init_stats()` method can
+# be used for this purpose. To get the summary statistics, we create a dummy
+# environment and run it for a given number of steps, collect data over a given
+# number of steps and compute its summary statistics.
+#
+
+
+def get_env_stats():
+ """Gets the stats of an environment."""
+ proof_env = make_transformed_env(make_env())
+ t = proof_env.transform[2]
+ t.init_stats(init_env_steps)
+ transform_state_dict = t.state_dict()
+ proof_env.close()
+ return transform_state_dict
+
+
+###############################################################################
+# Normalization stats
+# ~~~~~~~~~~~~~~~~~~~
+# Number of random steps used as for stats computation using ``ObservationNorm``
+
+init_env_steps = 5000
+
+transform_state_dict = get_env_stats()
+
+###############################################################################
+# Number of environments in each data collector
+env_per_collector = 4
+
+###############################################################################
+# We pass the stats computed earlier to normalize the output of our
+# environment:
+
+parallel_env = parallel_env_constructor(
+ env_per_collector=env_per_collector,
+ transform_state_dict=transform_state_dict,
+)
+
+
+from torchrl.data import CompositeSpec
+
+###############################################################################
+# Building the model
+# ------------------
+#
+# We now turn to the setup of the model. As we have seen, DDPG requires a
+# value network, trained to estimate the value of a state-action pair, and a
+# parametric actor that learns how to select actions that maximize this value.
+#
+# Recall that building a TorchRL module requires two steps:
+#
+# - writing the :class:`torch.nn.Module` that will be used as network,
+# - wrapping the network in a :class:`tensordict.nn.TensorDictModule` where the
+# data flow is handled by specifying the input and output keys.
+#
+# In more complex scenarios, :class:`tensordict.nn.TensorDictSequential` can
+# also be used.
+#
+#
+# The Q-Value network is wrapped in a :class:`~torchrl.modules.ValueOperator`
+# that automatically sets the ``out_keys`` to ``"state_action_value`` for q-value
+# networks and ``state_value`` for other value networks.
+#
+# TorchRL provides a built-in version of the DDPG networks as presented in the
+# original paper. These can be found under :class:`~torchrl.modules.DdpgMlpActor`
+# and :class:`~torchrl.modules.DdpgMlpQNet`.
+#
+# Since we use lazy modules, it is necessary to materialize the lazy modules
+# before being able to move the policy from device to device and achieve other
+# operations. Hence, it is good practice to run the modules with a small
+# sample of data. For this purpose, we generate fake data from the
+# environment specs.
+#
+
+from torchrl.modules import (
+ ActorCriticWrapper,
+ DdpgMlpActor,
+ DdpgMlpQNet,
+ OrnsteinUhlenbeckProcessModule,
+ ProbabilisticActor,
+ TanhDelta,
+ ValueOperator,
+)
+
+
+def make_ddpg_actor(
+ transform_state_dict,
+ device="cpu",
+):
+ proof_environment = make_transformed_env(make_env())
+ proof_environment.transform[2].init_stats(3)
+ proof_environment.transform[2].load_state_dict(transform_state_dict)
+
+ out_features = proof_environment.action_spec.shape[-1]
+
+ actor_net = DdpgMlpActor(
+ action_dim=out_features,
+ )
+
+ in_keys = ["observation_vector"]
+ out_keys = ["param"]
+
+ actor = TensorDictModule(
+ actor_net,
+ in_keys=in_keys,
+ out_keys=out_keys,
+ )
+
+ actor = ProbabilisticActor(
+ actor,
+ distribution_class=TanhDelta,
+ in_keys=["param"],
+ spec=CompositeSpec(action=proof_environment.action_spec),
+ ).to(device)
+
+ q_net = DdpgMlpQNet()
+
+ in_keys = in_keys + ["action"]
+ qnet = ValueOperator(
+ in_keys=in_keys,
+ module=q_net,
+ ).to(device)
+
+ # initialize lazy modules
+ qnet(actor(proof_environment.reset().to(device)))
+ return actor, qnet
+
+
+actor, qnet = make_ddpg_actor(
+ transform_state_dict=transform_state_dict,
+ device=device,
+)
+
+###############################################################################
+# Exploration
+# ~~~~~~~~~~~
+#
+# The policy is passed into a :class:`~torchrl.modules.OrnsteinUhlenbeckProcessModule`
+# exploration module, as suggested in the original paper.
+# Let's define the number of frames before OU noise reaches its minimum value
+annealing_frames = 1_000_000
+
+actor_model_explore = TensorDictSequential(
+ actor,
+ OrnsteinUhlenbeckProcessModule(
+ spec=actor.spec.clone(),
+ annealing_num_steps=annealing_frames,
+ ).to(device),
+)
+if device == torch.device("cpu"):
+ actor_model_explore.share_memory()
+
+
+###############################################################################
+# Data collector
+# --------------
+#
+# TorchRL provides specialized classes to help you collect data by executing
+# the policy in the environment. These "data collectors" iteratively compute
+# the action to be executed at a given time, then execute a step in the
+# environment and reset it when required.
+# Data collectors are designed to help developers have a tight control
+# on the number of frames per batch of data, on the (a)sync nature of this
+# collection and on the resources allocated to the data collection (for example
+# GPU, number of workers, and so on).
+#
+# Here we will use
+# :class:`~torchrl.collectors.SyncDataCollector`, a simple, single-process
+# data collector. TorchRL offers other collectors, such as
+# :class:`~torchrl.collectors.MultiaSyncDataCollector`, which executed the
+# rollouts in an asynchronous manner (for example, data will be collected while
+# the policy is being optimized, thereby decoupling the training and
+# data collection).
+#
+# The parameters to specify are:
+#
+# - an environment factory or an environment,
+# - the policy,
+# - the total number of frames before the collector is considered empty,
+# - the maximum number of frames per trajectory (useful for non-terminating
+# environments, like ``dm_control`` ones).
+#
+# .. note::
+#
+# The ``max_frames_per_traj`` passed to the collector will have the effect
+# of registering a new :class:`~torchrl.envs.StepCounter` transform
+# with the environment used for inference. We can achieve the same result
+# manually, as we do in this script.
+#
+# One should also pass:
+#
+# - the number of frames in each batch collected,
+# - the number of random steps executed independently from the policy,
+# - the devices used for policy execution
+# - the devices used to store data before the data is passed to the main
+# process.
+#
+# The total frames we will use during training should be around 1M.
+total_frames = 10_000 # 1_000_000
+
+###############################################################################
+# The number of frames returned by the collector at each iteration of the outer
+# loop is equal to the length of each sub-trajectories times the number of
+# environments run in parallel in each collector.
+#
+# In other words, we expect batches from the collector to have a shape
+# ``[env_per_collector, traj_len]`` where
+# ``traj_len=frames_per_batch/env_per_collector``:
+#
+traj_len = 200
+frames_per_batch = env_per_collector * traj_len
+init_random_frames = 5000
+num_collectors = 2
+
+from torchrl.collectors import SyncDataCollector
+from torchrl.envs import ExplorationType
+
+collector = SyncDataCollector(
+ parallel_env,
+ policy=actor_model_explore,
+ total_frames=total_frames,
+ frames_per_batch=frames_per_batch,
+ init_random_frames=init_random_frames,
+ reset_at_each_iter=False,
+ split_trajs=False,
+ device=collector_device,
+ exploration_type=ExplorationType.RANDOM,
+)
+
+###############################################################################
+# Evaluator: building your recorder object
+# ----------------------------------------
+#
+# As the training data is obtained using some exploration strategy, the true
+# performance of our algorithm needs to be assessed in deterministic mode. We
+# do this using a dedicated class, ``Recorder``, which executes the policy in
+# the environment at a given frequency and returns some statistics obtained
+# from these simulations.
+#
+# The following helper function builds this object:
+from torchrl.trainers import Recorder
+
+
+def make_recorder(actor_model_explore, transform_state_dict, record_interval):
+ base_env = make_env()
+ environment = make_transformed_env(base_env)
+ environment.transform[2].init_stats(
+ 3
+ ) # must be instantiated to load the state dict
+ environment.transform[2].load_state_dict(transform_state_dict)
+
+ recorder_obj = Recorder(
+ record_frames=1000,
+ policy_exploration=actor_model_explore,
+ environment=environment,
+ exploration_type=ExplorationType.DETERMINISTIC,
+ record_interval=record_interval,
+ )
+ return recorder_obj
+
+
+###############################################################################
+# We will be recording the performance every 10 batch collected
+record_interval = 10
+
+recorder = make_recorder(
+ actor_model_explore, transform_state_dict, record_interval=record_interval
+)
+
+from torchrl.data.replay_buffers import (
+ LazyMemmapStorage,
+ PrioritizedSampler,
+ RandomSampler,
+ TensorDictReplayBuffer,
+)
+
+###############################################################################
+# Replay buffer
+# -------------
+#
+# Replay buffers come in two flavors: prioritized (where some error signal
+# is used to give a higher likelihood of sampling to some items than others)
+# and regular, circular experience replay.
+#
+# TorchRL replay buffers are composable: one can pick up the storage, sampling
+# and writing strategies. It is also possible to
+# store tensors on physical memory using a memory-mapped array. The following
+# function takes care of creating the replay buffer with the desired
+# hyperparameters:
+#
+
+from torchrl.envs import RandomCropTensorDict
+
+
+def make_replay_buffer(buffer_size, batch_size, random_crop_len, prefetch=3, prb=False):
+ if prb:
+ sampler = PrioritizedSampler(
+ max_capacity=buffer_size,
+ alpha=0.7,
+ beta=0.5,
+ )
+ else:
+ sampler = RandomSampler()
+ replay_buffer = TensorDictReplayBuffer(
+ storage=LazyMemmapStorage(
+ buffer_size,
+ scratch_dir=buffer_scratch_dir,
+ ),
+ batch_size=batch_size,
+ sampler=sampler,
+ pin_memory=False,
+ prefetch=prefetch,
+ transform=RandomCropTensorDict(random_crop_len, sample_dim=1),
+ )
+ return replay_buffer
+
+
+###############################################################################
+# We'll store the replay buffer in a temporary directory on disk
+
+import tempfile
+
+tmpdir = tempfile.TemporaryDirectory()
+buffer_scratch_dir = tmpdir.name
+
+###############################################################################
+# Replay buffer storage and batch size
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# TorchRL replay buffer counts the number of elements along the first dimension.
+# Since we'll be feeding trajectories to our buffer, we need to adapt the buffer
+# size by dividing it by the length of the sub-trajectories yielded by our
+# data collector.
+# Regarding the batch-size, our sampling strategy will consist in sampling
+# trajectories of length ``traj_len=200`` before selecting sub-trajectories
+# or length ``random_crop_len=25`` on which the loss will be computed.
+# This strategy balances the choice of storing whole trajectories of a certain
+# length with the need for providing samples with a sufficient heterogeneity
+# to our loss. The following figure shows the dataflow from a collector
+# that gets 8 frames in each batch with 2 environments run in parallel,
+# feeds them to a replay buffer that contains 1000 trajectories and
+# samples sub-trajectories of 2 time steps each.
+#
+# .. figure:: /_static/img/replaybuffer_traj.png
+# :alt: Storing trajectories in the replay buffer
+#
+# Let's start with the number of frames stored in the buffer
+
+
+def ceil_div(x, y):
+ return -x // (-y)
+
+
+buffer_size = 1_000_000
+buffer_size = ceil_div(buffer_size, traj_len)
+
+###############################################################################
+# Prioritized replay buffer is disabled by default
+prb = False
+
+###############################################################################
+# We also need to define how many updates we'll be doing per batch of data
+# collected. This is known as the update-to-data or ``UTD`` ratio:
+update_to_data = 64
+
+###############################################################################
+# We'll be feeding the loss with trajectories of length 25:
+random_crop_len = 25
+
+###############################################################################
+# In the original paper, the authors perform one update with a batch of 64
+# elements for each frame collected. Here, we reproduce the same ratio
+# but while realizing several updates at each batch collection. We
+# adapt our batch-size to achieve the same number of update-per-frame ratio:
+
+batch_size = ceil_div(64 * frames_per_batch, update_to_data * random_crop_len)
+
+replay_buffer = make_replay_buffer(
+ buffer_size=buffer_size,
+ batch_size=batch_size,
+ random_crop_len=random_crop_len,
+ prefetch=3,
+ prb=prb,
+)
+
+###############################################################################
+# Loss module construction
+# ------------------------
+#
+# We build our loss module with the actor and ``qnet`` we've just created.
+# Because we have target parameters to update, we _must_ create a target network
+# updater.
+#
+
+gamma = 0.99
+lmbda = 0.9
+tau = 0.001 # Decay factor for the target network
+
+loss_module = DDPGLoss(actor, qnet)
+
+###############################################################################
+# let's use the TD(lambda) estimator!
+loss_module.make_value_estimator(ValueEstimators.TDLambda, gamma=gamma, lmbda=lmbda, device=device)
+
+###############################################################################
+# .. note::
+# Off-policy usually dictates a TD(0) estimator. Here, we use a TD(:math:`\lambda`)
+# estimator, which will introduce some bias as the trajectory that follows
+# a certain state has been collected with an outdated policy.
+# This trick, as the multi-step trick that can be used during data collection,
+# are alternative versions of "hacks" that we usually find to work well in
+# practice despite the fact that they introduce some bias in the return
+# estimates.
+#
+# Target network updater
+# ~~~~~~~~~~~~~~~~~~~~~~
+#
+# Target networks are a crucial part of off-policy RL algorithms.
+# Updating the target network parameters is made easy thanks to the
+# :class:`~torchrl.objectives.HardUpdate` and :class:`~torchrl.objectives.SoftUpdate`
+# classes. They're built with the loss module as argument, and the update is
+# achieved via a call to `updater.step()` at the appropriate location in the
+# training loop.
+
+from torchrl.objectives.utils import SoftUpdate
+
+target_net_updater = SoftUpdate(loss_module, eps=1 - tau)
+
+###############################################################################
+# Optimizer
+# ~~~~~~~~~
+#
+# Finally, we will use the Adam optimizer for the policy and value network:
+
+from torch import optim
+
+optimizer_actor = optim.Adam(
+ loss_module.actor_network_params.values(True, True), lr=1e-4, weight_decay=0.0
+)
+optimizer_value = optim.Adam(
+ loss_module.value_network_params.values(True, True), lr=1e-3, weight_decay=1e-2
+)
+total_collection_steps = total_frames // frames_per_batch
+
+###############################################################################
+# Time to train the policy
+# ------------------------
+#
+# The training loop is pretty straightforward now that we have built all the
+# modules we need.
+#
+
+rewards = []
+rewards_eval = []
+
+# Main loop
+
+collected_frames = 0
+pbar = tqdm.tqdm(total=total_frames)
+r0 = None
+for i, tensordict in enumerate(collector):
+
+ # update weights of the inference policy
+ collector.update_policy_weights_()
+
+ if r0 is None:
+ r0 = tensordict["next", "reward"].mean().item()
+ pbar.update(tensordict.numel())
+
+ # extend the replay buffer with the new data
+ current_frames = tensordict.numel()
+ collected_frames += current_frames
+ replay_buffer.extend(tensordict.cpu())
+
+ # optimization steps
+ if collected_frames >= init_random_frames:
+ for _ in range(update_to_data):
+ # sample from replay buffer
+ sampled_tensordict = replay_buffer.sample().to(device)
+
+ # Compute loss
+ loss_dict = loss_module(sampled_tensordict)
+
+ # optimize
+ loss_dict["loss_actor"].backward()
+ gn1 = torch.nn.utils.clip_grad_norm_(
+ loss_module.actor_network_params.values(True, True), 10.0
+ )
+ optimizer_actor.step()
+ optimizer_actor.zero_grad()
+
+ loss_dict["loss_value"].backward()
+ gn2 = torch.nn.utils.clip_grad_norm_(
+ loss_module.value_network_params.values(True, True), 10.0
+ )
+ optimizer_value.step()
+ optimizer_value.zero_grad()
+
+ gn = (gn1**2 + gn2**2) ** 0.5
+
+ # update priority
+ if prb:
+ replay_buffer.update_tensordict_priority(sampled_tensordict)
+ # update target network
+ target_net_updater.step()
+
+ rewards.append(
+ (
+ i,
+ tensordict["next", "reward"].mean().item(),
+ )
+ )
+ td_record = recorder(None)
+ if td_record is not None:
+ rewards_eval.append((i, td_record["r_evaluation"].item()))
+ if len(rewards_eval) and collected_frames >= init_random_frames:
+ target_value = loss_dict["target_value"].item()
+ loss_value = loss_dict["loss_value"].item()
+ loss_actor = loss_dict["loss_actor"].item()
+ rn = sampled_tensordict["next", "reward"].mean().item()
+ rs = sampled_tensordict["next", "reward"].std().item()
+ pbar.set_description(
+ f"reward: {rewards[-1][1]: 4.2f} (r0 = {r0: 4.2f}), "
+ f"reward eval: reward: {rewards_eval[-1][1]: 4.2f}, "
+ f"reward normalized={rn :4.2f}/{rs :4.2f}, "
+ f"grad norm={gn: 4.2f}, "
+ f"loss_value={loss_value: 4.2f}, "
+ f"loss_actor={loss_actor: 4.2f}, "
+ f"target value: {target_value: 4.2f}"
+ )
+
+ # update the exploration strategy
+ actor_model_explore[1].step(current_frames)
+
+collector.shutdown()
+del collector
+
+###############################################################################
+# Experiment results
+# ------------------
+#
+# We make a simple plot of the average rewards during training. We can observe
+# that our policy learned quite well to solve the task.
+#
+# .. note::
+# As already mentioned above, to get a more reasonable performance,
+# use a greater value for ``total_frames`` for example, 1M.
+
+from matplotlib import pyplot as plt
+
+plt.figure()
+plt.plot(*zip(*rewards), label="training")
+plt.plot(*zip(*rewards_eval), label="eval")
+plt.legend()
+plt.xlabel("iter")
+plt.ylabel("reward")
+plt.tight_layout()
+
+###############################################################################
+# Conclusion
+# ----------
+#
+# In this tutorial, we have learned how to code a loss module in TorchRL given
+# the concrete example of DDPG.
+#
+# The key takeaways are:
+#
+# - How to use the :class:`~torchrl.objectives.LossModule` class to code up a new
+# loss component;
+# - How to use (or not) a target network, and how to update its parameters;
+# - How to create an optimizer associated with a loss module.
+#
+# Next Steps
+# ----------
+#
+# To iterate further on this loss module we might consider:
+#
+# - Using `@dispatch` (see `[Feature] Distpatch IQL loss module `_.)
+# - Allowing flexible TensorDict keys.
+#
diff --git a/advanced_source/cpp_autograd.rst b/advanced_source/cpp_autograd.rst
new file mode 100644
index 00000000000..51e5e0b358f
--- /dev/null
+++ b/advanced_source/cpp_autograd.rst
@@ -0,0 +1,437 @@
+Autograd in C++ Frontend
+========================
+
+The ``autograd`` package is crucial for building highly flexible and dynamic neural
+networks in PyTorch. Most of the autograd APIs in PyTorch Python frontend are also available
+in C++ frontend, allowing easy translation of autograd code from Python to C++.
+
+In this tutorial explore several examples of doing autograd in PyTorch C++ frontend.
+Note that this tutorial assumes that you already have a basic understanding of
+autograd in Python frontend. If that's not the case, please first read
+`Autograd: Automatic Differentiation `_.
+
+Basic autograd operations
+-------------------------
+
+(Adapted from `this tutorial `_)
+
+Create a tensor and set ``torch::requires_grad()`` to track computation with it
+
+.. code-block:: cpp
+
+ auto x = torch::ones({2, 2}, torch::requires_grad());
+ std::cout << x << std::endl;
+
+Out:
+
+.. code-block:: shell
+
+ 1 1
+ 1 1
+ [ CPUFloatType{2,2} ]
+
+
+Do a tensor operation:
+
+.. code-block:: cpp
+
+ auto y = x + 2;
+ std::cout << y << std::endl;
+
+Out:
+
+.. code-block:: shell
+
+ 3 3
+ 3 3
+ [ CPUFloatType{2,2} ]
+
+``y`` was created as a result of an operation, so it has a ``grad_fn``.
+
+.. code-block:: cpp
+
+ std::cout << y.grad_fn()->name() << std::endl;
+
+Out:
+
+.. code-block:: shell
+
+ AddBackward1
+
+Do more operations on ``y``
+
+.. code-block:: cpp
+
+ auto z = y * y * 3;
+ auto out = z.mean();
+
+ std::cout << z << std::endl;
+ std::cout << z.grad_fn()->name() << std::endl;
+ std::cout << out << std::endl;
+ std::cout << out.grad_fn()->name() << std::endl;
+
+Out:
+
+.. code-block:: shell
+
+ 27 27
+ 27 27
+ [ CPUFloatType{2,2} ]
+ MulBackward1
+ 27
+ [ CPUFloatType{} ]
+ MeanBackward0
+
+
+``.requires_grad_( ... )`` changes an existing tensor's ``requires_grad`` flag in-place.
+
+.. code-block:: cpp
+
+ auto a = torch::randn({2, 2});
+ a = ((a * 3) / (a - 1));
+ std::cout << a.requires_grad() << std::endl;
+
+ a.requires_grad_(true);
+ std::cout << a.requires_grad() << std::endl;
+
+ auto b = (a * a).sum();
+ std::cout << b.grad_fn()->name() << std::endl;
+
+Out:
+
+.. code-block:: shell
+
+ false
+ true
+ SumBackward0
+
+Let's backprop now. Because ``out`` contains a single scalar, ``out.backward()``
+is equivalent to ``out.backward(torch::tensor(1.))``.
+
+.. code-block:: cpp
+
+ out.backward();
+
+Print gradients d(out)/dx
+
+.. code-block:: cpp
+
+ std::cout << x.grad() << std::endl;
+
+Out:
+
+.. code-block:: shell
+
+ 4.5000 4.5000
+ 4.5000 4.5000
+ [ CPUFloatType{2,2} ]
+
+You should have got a matrix of ``4.5``. For explanations on how we arrive at this value,
+please see `the corresponding section in this tutorial `_.
+
+Now let's take a look at an example of vector-Jacobian product:
+
+.. code-block:: cpp
+
+ x = torch::randn(3, torch::requires_grad());
+
+ y = x * 2;
+ while (y.norm().item() < 1000) {
+ y = y * 2;
+ }
+
+ std::cout << y << std::endl;
+ std::cout << y.grad_fn()->name() << std::endl;
+
+Out:
+
+.. code-block:: shell
+
+ -1021.4020
+ 314.6695
+ -613.4944
+ [ CPUFloatType{3} ]
+ MulBackward1
+
+If we want the vector-Jacobian product, pass the vector to ``backward`` as argument:
+
+.. code-block:: cpp
+
+ auto v = torch::tensor({0.1, 1.0, 0.0001}, torch::kFloat);
+ y.backward(v);
+
+ std::cout << x.grad() << std::endl;
+
+Out:
+
+.. code-block:: shell
+
+ 102.4000
+ 1024.0000
+ 0.1024
+ [ CPUFloatType{3} ]
+
+You can also stop autograd from tracking history on tensors that require gradients
+either by putting ``torch::NoGradGuard`` in a code block
+
+.. code-block:: cpp
+
+ std::cout << x.requires_grad() << std::endl;
+ std::cout << x.pow(2).requires_grad() << std::endl;
+
+ {
+ torch::NoGradGuard no_grad;
+ std::cout << x.pow(2).requires_grad() << std::endl;
+ }
+
+
+Out:
+
+.. code-block:: shell
+
+ true
+ true
+ false
+
+Or by using ``.detach()`` to get a new tensor with the same content but that does
+not require gradients:
+
+.. code-block:: cpp
+
+ std::cout << x.requires_grad() << std::endl;
+ y = x.detach();
+ std::cout << y.requires_grad() << std::endl;
+ std::cout << x.eq(y).all().item() << std::endl;
+
+Out:
+
+.. code-block:: shell
+
+ true
+ false
+ true
+
+For more information on C++ tensor autograd APIs such as ``grad`` / ``requires_grad`` /
+``is_leaf`` / ``backward`` / ``detach`` / ``detach_`` / ``register_hook`` / ``retain_grad``,
+please see `the corresponding C++ API docs `_.
+
+Computing higher-order gradients in C++
+---------------------------------------
+
+One of the applications of higher-order gradients is calculating gradient penalty.
+Let's see an example of it using ``torch::autograd::grad``:
+
+.. code-block:: cpp
+
+ #include
+
+ auto model = torch::nn::Linear(4, 3);
+
+ auto input = torch::randn({3, 4}).requires_grad_(true);
+ auto output = model(input);
+
+ // Calculate loss
+ auto target = torch::randn({3, 3});
+ auto loss = torch::nn::MSELoss()(output, target);
+
+ // Use norm of gradients as penalty
+ auto grad_output = torch::ones_like(output);
+ auto gradient = torch::autograd::grad({output}, {input}, /*grad_outputs=*/{grad_output}, /*create_graph=*/true)[0];
+ auto gradient_penalty = torch::pow((gradient.norm(2, /*dim=*/1) - 1), 2).mean();
+
+ // Add gradient penalty to loss
+ auto combined_loss = loss + gradient_penalty;
+ combined_loss.backward();
+
+ std::cout << input.grad() << std::endl;
+
+Out:
+
+.. code-block:: shell
+
+ -0.1042 -0.0638 0.0103 0.0723
+ -0.2543 -0.1222 0.0071 0.0814
+ -0.1683 -0.1052 0.0355 0.1024
+ [ CPUFloatType{3,4} ]
+
+Please see the documentation for ``torch::autograd::backward``
+(`link `_)
+and ``torch::autograd::grad``
+(`link `_)
+for more information on how to use them.
+
+Using custom autograd function in C++
+-------------------------------------
+
+(Adapted from `this tutorial `_)
+
+Adding a new elementary operation to ``torch::autograd`` requires implementing a new ``torch::autograd::Function``
+subclass for each operation. ``torch::autograd::Function`` s are what ``torch::autograd``
+uses to compute the results and gradients, and encode the operation history. Every
+new function requires you to implement 2 methods: ``forward`` and ``backward``, and
+please see `this link `_
+for the detailed requirements.
+
+Below you can find code for a ``Linear`` function from ``torch::nn``:
+
+.. code-block:: cpp
+
+ #include
+
+ using namespace torch::autograd;
+
+ // Inherit from Function
+ class LinearFunction : public Function {
+ public:
+ // Note that both forward and backward are static functions
+
+ // bias is an optional argument
+ static torch::Tensor forward(
+ AutogradContext *ctx, torch::Tensor input, torch::Tensor weight, torch::Tensor bias = torch::Tensor()) {
+ ctx->save_for_backward({input, weight, bias});
+ auto output = input.mm(weight.t());
+ if (bias.defined()) {
+ output += bias.unsqueeze(0).expand_as(output);
+ }
+ return output;
+ }
+
+ static tensor_list backward(AutogradContext *ctx, tensor_list grad_outputs) {
+ auto saved = ctx->get_saved_variables();
+ auto input = saved[0];
+ auto weight = saved[1];
+ auto bias = saved[2];
+
+ auto grad_output = grad_outputs[0];
+ auto grad_input = grad_output.mm(weight);
+ auto grad_weight = grad_output.t().mm(input);
+ auto grad_bias = torch::Tensor();
+ if (bias.defined()) {
+ grad_bias = grad_output.sum(0);
+ }
+
+ return {grad_input, grad_weight, grad_bias};
+ }
+ };
+
+Then, we can use the ``LinearFunction`` in the following way:
+
+.. code-block:: cpp
+
+ auto x = torch::randn({2, 3}).requires_grad_();
+ auto weight = torch::randn({4, 3}).requires_grad_();
+ auto y = LinearFunction::apply(x, weight);
+ y.sum().backward();
+
+ std::cout << x.grad() << std::endl;
+ std::cout << weight.grad() << std::endl;
+
+Out:
+
+.. code-block:: shell
+
+ 0.5314 1.2807 1.4864
+ 0.5314 1.2807 1.4864
+ [ CPUFloatType{2,3} ]
+ 3.7608 0.9101 0.0073
+ 3.7608 0.9101 0.0073
+ 3.7608 0.9101 0.0073
+ 3.7608 0.9101 0.0073
+ [ CPUFloatType{4,3} ]
+
+Here, we give an additional example of a function that is parametrized by non-tensor arguments:
+
+.. code-block:: cpp
+
+ #include
+
+ using namespace torch::autograd;
+
+ class MulConstant : public Function {
+ public:
+ static torch::Tensor forward(AutogradContext *ctx, torch::Tensor tensor, double constant) {
+ // ctx is a context object that can be used to stash information
+ // for backward computation
+ ctx->saved_data["constant"] = constant;
+ return tensor * constant;
+ }
+
+ static tensor_list backward(AutogradContext *ctx, tensor_list grad_outputs) {
+ // We return as many input gradients as there were arguments.
+ // Gradients of non-tensor arguments to forward must be `torch::Tensor()`.
+ return {grad_outputs[0] * ctx->saved_data["constant"].toDouble(), torch::Tensor()};
+ }
+ };
+
+Then, we can use the ``MulConstant`` in the following way:
+
+.. code-block:: cpp
+
+ auto x = torch::randn({2}).requires_grad_();
+ auto y = MulConstant::apply(x, 5.5);
+ y.sum().backward();
+
+ std::cout << x.grad() << std::endl;
+
+Out:
+
+.. code-block:: shell
+
+ 5.5000
+ 5.5000
+ [ CPUFloatType{2} ]
+
+For more information on ``torch::autograd::Function``, please see
+`its documentation `_.
+
+Translating autograd code from Python to C++
+--------------------------------------------
+
+On a high level, the easiest way to use autograd in C++ is to have working
+autograd code in Python first, and then translate your autograd code from Python to
+C++ using the following table:
+
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Python | C++ |
++================================+========================================================================================================================================================================+
+| ``torch.autograd.backward`` | ``torch::autograd::backward`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.autograd.grad`` | ``torch::autograd::grad`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.Tensor.detach`` | ``torch::Tensor::detach`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.Tensor.detach_`` | ``torch::Tensor::detach_`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.Tensor.backward`` | ``torch::Tensor::backward`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.Tensor.register_hook`` | ``torch::Tensor::register_hook`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.Tensor.requires_grad`` | ``torch::Tensor::requires_grad_`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.Tensor.retain_grad`` | ``torch::Tensor::retain_grad`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.Tensor.grad`` | ``torch::Tensor::grad`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.Tensor.grad_fn`` | ``torch::Tensor::grad_fn`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.Tensor.set_data`` | ``torch::Tensor::set_data`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.Tensor.data`` | ``torch::Tensor::data`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.Tensor.output_nr`` | ``torch::Tensor::output_nr`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| ``torch.Tensor.is_leaf`` | ``torch::Tensor::is_leaf`` (`link `_) |
++--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+After translation, most of your Python autograd code should just work in C++.
+If that's not the case, please file a bug report at `GitHub issues `_
+and we will fix it as soon as possible.
+
+Conclusion
+----------
+
+You should now have a good overview of PyTorch's C++ autograd API.
+You can find the code examples displayed in this note `here
+`_. As always, if you run into any
+problems or have questions, you can use our `forum `_
+or `GitHub issues `_ to get in touch.
diff --git a/advanced_source/cpp_custom_ops.rst b/advanced_source/cpp_custom_ops.rst
new file mode 100644
index 00000000000..512c39b2a68
--- /dev/null
+++ b/advanced_source/cpp_custom_ops.rst
@@ -0,0 +1,582 @@
+.. _cpp-custom-ops-tutorial:
+
+Custom C++ and CUDA Operators
+=============================
+
+**Author:** `Richard Zou `_
+
+.. grid:: 2
+
+ .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn
+ :class-card: card-prerequisites
+
+ * How to integrate custom operators written in C++/CUDA with PyTorch
+ * How to test custom operators using ``torch.library.opcheck``
+
+ .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites
+ :class-card: card-prerequisites
+
+ * PyTorch 2.4 or later
+ * Basic understanding of C++ and CUDA programming
+
+.. note::
+
+ This tutorial will also work on AMD ROCm with no additional modifications.
+
+PyTorch offers a large library of operators that work on Tensors (e.g. torch.add, torch.sum, etc).
+However, you may wish to bring a new custom operator to PyTorch. This tutorial demonstrates the
+blessed path to authoring a custom operator written in C++/CUDA.
+
+For our tutorial, we’ll demonstrate how to author a fused multiply-add C++
+and CUDA operator that composes with PyTorch subsystems. The semantics of
+the operation are as follows:
+
+.. code-block:: python
+
+ def mymuladd(a: Tensor, b: Tensor, c: float):
+ return a * b + c
+
+You can find the end-to-end working example for this tutorial
+`here `_ .
+
+Setting up the Build System
+---------------------------
+
+If you are developing custom C++/CUDA code, it must be compiled.
+Note that if you’re interfacing with a Python library that already has bindings
+to precompiled C++/CUDA code, you might consider writing a custom Python operator
+instead (:ref:`python-custom-ops-tutorial`).
+
+Use `torch.utils.cpp_extension `_
+to compile custom C++/CUDA code for use with PyTorch
+C++ extensions may be built either "ahead of time" with setuptools, or "just in time"
+via `load_inline `_;
+we’ll focus on the "ahead of time" flavor.
+
+Using ``cpp_extension`` is as simple as writing the following ``setup.py``:
+
+.. code-block:: python
+
+ from setuptools import setup, Extension
+ from torch.utils import cpp_extension
+
+ setup(name="extension_cpp",
+ ext_modules=[
+ cpp_extension.CppExtension(
+ "extension_cpp",
+ ["muladd.cpp"],
+ # define Py_LIMITED_API with min version 3.9 to expose only the stable
+ # limited API subset from Python.h
+ extra_compile_args={"cxx": ["-DPy_LIMITED_API=0x03090000"]},
+ py_limited_api=True)], # Build 1 wheel across multiple Python versions
+ cmdclass={'build_ext': cpp_extension.BuildExtension},
+ options={"bdist_wheel": {"py_limited_api": "cp39"}} # 3.9 is minimum supported Python version
+ )
+
+If you need to compile CUDA code (for example, ``.cu`` files), then instead use
+`torch.utils.cpp_extension.CUDAExtension `_.
+Please see `extension-cpp `_ for an
+example for how this is set up.
+
+The above example represents what we refer to as a CPython agnostic wheel, meaning we are
+building a single wheel that can be run across multiple CPython versions (similar to pure
+Python packages). CPython agnosticism is desirable in minimizing the number of wheels your
+custom library needs to support and release. The minimum version we'd like to support is
+3.9, since it is the oldest supported version currently, so we use the corresponding hexcode
+and specifier throughout the setup code. We suggest building the extension in the same
+environment as the minimum CPython version you'd like to support to minimize unknown behavior,
+so, here, we build the extension in a CPython 3.9 environment. When built, this single wheel
+will be runnable in any CPython environment 3.9+. To achieve this, there are three key lines
+to note.
+
+The first is the specification of ``Py_LIMITED_API`` in ``extra_compile_args`` to the
+minimum CPython version you would like to support:
+
+.. code-block:: python
+
+ extra_compile_args={"cxx": ["-DPy_LIMITED_API=0x03090000"]},
+
+Defining the ``Py_LIMITED_API`` flag helps verify that the extension is in fact
+only using the `CPython Stable Limited API `_,
+which is a requirement for the building a CPython agnostic wheel. If this requirement
+is not met, it is possible to build a wheel that looks CPython agnostic but will crash,
+or worse, be silently incorrect, in another CPython environment. Take care to avoid
+using unstable CPython APIs, for example APIs from libtorch_python (in particular
+pytorch/python bindings,) and to only use APIs from libtorch (ATen objects, operators
+and the dispatcher). We strongly recommend defining the ``Py_LIMITED_API`` flag to
+help ascertain the extension is compliant and safe as a CPython agnostic wheel. Note that
+defining this flag is not a full guarantee that the built wheel is CPython agnostic, but
+it is better than the wild wild west. There are several caveats mentioned in the
+`Python docs `_,
+and you should test and verify yourself that the wheel is truly agnostic for the relevant
+CPython versions.
+
+The second and third lines specifying ``py_limited_api`` inform setuptools that you intend
+to build a CPython agnostic wheel and will influence the naming of the wheel accordingly:
+
+.. code-block:: python
+
+ setup(name="extension_cpp",
+ ext_modules=[
+ cpp_extension.CppExtension(
+ ...,
+ py_limited_api=True)], # Build 1 wheel across multiple Python versions
+ ...,
+ options={"bdist_wheel": {"py_limited_api": "cp39"}} # 3.9 is minimum supported Python version
+ )
+
+It is necessary to specify ``py_limited_api=True`` as an argument to CppExtension/
+CUDAExtension and also as an option to the ``"bdist_wheel"`` command with the minimal
+supported CPython version (in this case, 3.9). Consequently, the ``setup`` in our
+tutorial would build one properly named wheel that could be installed across multiple
+CPython versions ``>=3.9``.
+
+If your extension uses CPython APIs outside the stable limited set, then you cannot
+build a CPython agnostic wheel! You should build one wheel per CPython version instead,
+like so:
+
+.. code-block:: python
+
+ from setuptools import setup, Extension
+ from torch.utils import cpp_extension
+
+ setup(name="extension_cpp",
+ ext_modules=[
+ cpp_extension.CppExtension(
+ "extension_cpp",
+ ["muladd.cpp"])],
+ cmdclass={'build_ext': cpp_extension.BuildExtension},
+ )
+
+
+Defining the custom op and adding backend implementations
+---------------------------------------------------------
+First, let's write a C++ function that computes ``mymuladd``:
+
+.. code-block:: cpp
+
+ at::Tensor mymuladd_cpu(at::Tensor a, const at::Tensor& b, double c) {
+ TORCH_CHECK(a.sizes() == b.sizes());
+ TORCH_CHECK(a.dtype() == at::kFloat);
+ TORCH_CHECK(b.dtype() == at::kFloat);
+ TORCH_INTERNAL_ASSERT(a.device().type() == at::DeviceType::CPU);
+ TORCH_INTERNAL_ASSERT(b.device().type() == at::DeviceType::CPU);
+ at::Tensor a_contig = a.contiguous();
+ at::Tensor b_contig = b.contiguous();
+ at::Tensor result = torch::empty(a_contig.sizes(), a_contig.options());
+ const float* a_ptr = a_contig.data_ptr();
+ const float* b_ptr = b_contig.data_ptr();
+ float* result_ptr = result.data_ptr();
+ for (int64_t i = 0; i < result.numel(); i++) {
+ result_ptr[i] = a_ptr[i] * b_ptr[i] + c;
+ }
+ return result;
+ }
+
+In order to use this from PyTorch’s Python frontend, we need to register it
+as a PyTorch operator using the ``TORCH_LIBRARY`` API. This will automatically
+bind the operator to Python.
+
+Operator registration is a two step-process:
+
+- **Defining the operator** - This step ensures that PyTorch is aware of the new operator.
+- **Registering backend implementations** - In this step, implementations for various
+ backends, such as CPU and CUDA, are associated with the operator.
+
+Defining an operator
+^^^^^^^^^^^^^^^^^^^^
+To define an operator, follow these steps:
+
+1. select a namespace for an operator. We recommend the namespace be the name of your top-level
+ project; we’ll use "extension_cpp" in our tutorial.
+2. provide a schema string that specifies the input/output types of the operator and if an
+ input Tensors will be mutated. We support more types in addition to Tensor and float;
+ please see `The Custom Operators Manual `_
+ for more details.
+
+ * If you are authoring an operator that can mutate its input Tensors, please see here
+ (:ref:`mutable-ops`) for how to specify that.
+
+.. code-block:: cpp
+
+ TORCH_LIBRARY(extension_cpp, m) {
+ // Note that "float" in the schema corresponds to the C++ double type
+ // and the Python float type.
+ m.def("mymuladd(Tensor a, Tensor b, float c) -> Tensor");
+ }
+
+This makes the operator available from Python via ``torch.ops.extension_cpp.mymuladd``.
+
+Registering backend implementations for an operator
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Use ``TORCH_LIBRARY_IMPL`` to register a backend implementation for the operator.
+
+.. code-block:: cpp
+
+ TORCH_LIBRARY_IMPL(extension_cpp, CPU, m) {
+ m.impl("mymuladd", &mymuladd_cpu);
+ }
+
+If you also have a CUDA implementation of ``myaddmul``, you can register it
+in a separate ``TORCH_LIBRARY_IMPL`` block:
+
+.. code-block:: cpp
+
+ __global__ void muladd_kernel(int numel, const float* a, const float* b, float c, float* result) {
+ int idx = blockIdx.x * blockDim.x + threadIdx.x;
+ if (idx < numel) result[idx] = a[idx] * b[idx] + c;
+ }
+
+ at::Tensor mymuladd_cuda(const at::Tensor& a, const at::Tensor& b, double c) {
+ TORCH_CHECK(a.sizes() == b.sizes());
+ TORCH_CHECK(a.dtype() == at::kFloat);
+ TORCH_CHECK(b.dtype() == at::kFloat);
+ TORCH_INTERNAL_ASSERT(a.device().type() == at::DeviceType::CUDA);
+ TORCH_INTERNAL_ASSERT(b.device().type() == at::DeviceType::CUDA);
+ at::Tensor a_contig = a.contiguous();
+ at::Tensor b_contig = b.contiguous();
+ at::Tensor result = torch::empty(a_contig.sizes(), a_contig.options());
+ const float* a_ptr = a_contig.data_ptr();
+ const float* b_ptr = b_contig.data_ptr();
+ float* result_ptr = result.data_ptr();
+
+ int numel = a_contig.numel();
+ muladd_kernel<<<(numel+255)/256, 256>>>(numel, a_ptr, b_ptr, c, result_ptr);
+ return result;
+ }
+
+ TORCH_LIBRARY_IMPL(extension_cpp, CUDA, m) {
+ m.impl("mymuladd", &mymuladd_cuda);
+ }
+
+Adding ``torch.compile`` support for an operator
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To add ``torch.compile`` support for an operator, we must add a FakeTensor kernel (also
+known as a "meta kernel" or "abstract impl"). FakeTensors are Tensors that have
+metadata (such as shape, dtype, device) but no data: the FakeTensor kernel for an
+operator specifies how to compute the metadata of output tensors given the metadata of input tensors.
+The FakeTensor kernel should return dummy Tensors of your choice with
+the correct Tensor metadata (shape/strides/``dtype``/device).
+
+We recommend that this be done from Python via the ``torch.library.register_fake`` API,
+though it is possible to do this from C++ as well (see
+`The Custom Operators Manual `_
+for more details).
+
+.. code-block:: python
+
+ # Important: the C++ custom operator definitions should be loaded first
+ # before calling ``torch.library`` APIs that add registrations for the
+ # C++ custom operator(s). The following import loads our
+ # C++ custom operator definitions.
+ # Note that if you are striving for Python agnosticism, you should use
+ # the ``load_library(...)`` API call instead. See the next section for
+ # more details.
+ from . import _C
+
+ @torch.library.register_fake("extension_cpp::mymuladd")
+ def _(a, b, c):
+ torch._check(a.shape == b.shape)
+ torch._check(a.dtype == torch.float)
+ torch._check(b.dtype == torch.float)
+ torch._check(a.device == b.device)
+ return torch.empty_like(a)
+
+Setting up hybrid Python/C++ registration
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+In this tutorial, we defined a custom operator in C++, added CPU/CUDA
+implementations in C++, and added ``FakeTensor`` kernels and backward formulas
+in Python. The order in which these registrations are loaded (or imported)
+matters (importing in the wrong order will lead to an error).
+
+To use the custom operator with hybrid Python/C++ registrations, we must
+first load the C++ library that holds the custom operator definition
+and then call the ``torch.library`` registration APIs. This can happen in
+three ways:
+
+
+1. The first way to load the C++ library that holds the custom operator definition
+ is to define a dummy Python module for _C. Then, in Python, when you import the
+ module with ``import _C``, the ``.so`` files corresponding to the extension will
+ be loaded and the ``TORCH_LIBRARY`` and ``TORCH_LIBRARY_IMPL`` static initializers
+ will run. One can create a dummy Python module with ``PYBIND11_MODULE`` like below,
+ but you will notice that this does not compile with ``Py_LIMITED_API``, because
+ ``pybind11`` does not promise to only use the stable limited CPython API! With
+ the below code, you sadly cannot build a CPython agnostic wheel for your extension!
+ (Foreshadowing: I wonder what the second way is ;) ).
+
+.. code-block:: cpp
+
+ // in, say, not_agnostic/csrc/extension_BAD.cpp
+ #include
+
+ PYBIND11_MODULE("_C", m) {}
+
+.. code-block:: python
+
+ # in, say, extension/__init__.py
+ from . import _C
+
+2. In this tutorial, because we value being able to build a single wheel across multiple
+ CPython versions, we will replace the unstable ``PYBIND11`` call with stable API calls.
+ The below code compiles with ``-DPy_LIMITED_API=0x03090000`` and successfully creates
+ a dummy Python module for our ``_C`` extension so that it can be imported from Python.
+ See `extension_cpp/__init__.py `_
+ and `extension_cpp/csrc/muladd.cpp `_
+ for more details:
+
+.. code-block:: cpp
+
+ #include
+
+ extern "C" {
+ /* Creates a dummy empty _C module that can be imported from Python.
+ The import from Python will load the .so consisting of this file
+ in this extension, so that the TORCH_LIBRARY static initializers
+ below are run. */
+ PyObject* PyInit__C(void)
+ {
+ static struct PyModuleDef module_def = {
+ PyModuleDef_HEAD_INIT,
+ "_C", /* name of module */
+ NULL, /* module documentation, may be NULL */
+ -1, /* size of per-interpreter state of the module,
+ or -1 if the module keeps state in global variables. */
+ NULL, /* methods */
+ };
+ return PyModule_Create(&module_def);
+ }
+ }
+
+.. code-block:: python
+
+ # in, say, extension/__init__.py
+ from . import _C
+
+3. If you want to avoid ``Python.h`` entirely in your C++ custom operator, you may
+ use ``torch.ops.load_library("/path/to/library.so")`` in Python to load the ``.so``
+ file(s) compiled from the extension. Note that, with this method, there is no ``_C``
+ Python module created for the extension so you cannot call ``import _C`` from Python.
+ Instead of relying on the import statement to trigger the custom operators to be
+ registered, ``torch.ops.load_library("/path/to/library.so")`` will do the trick.
+ The challenge then is shifted towards understanding where the ``.so`` files are
+ located so that you can load them, which is not always trivial:
+
+.. code-block:: python
+
+ import torch
+ from pathlib import Path
+
+ so_files = list(Path(__file__).parent.glob("_C*.so"))
+ assert (
+ len(so_files) == 1
+ ), f"Expected one _C*.so file, found {len(so_files)}"
+ torch.ops.load_library(so_files[0])
+
+ from . import ops
+
+
+Adding training (autograd) support for an operator
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Use ``torch.library.register_autograd`` to add training support for an operator. Prefer
+this over directly using Python ``torch.autograd.Function`` or C++ ``torch::autograd::Function``;
+you must use those in a very specific way to avoid silent incorrectness (see
+`The Custom Operators Manual `_
+for more details).
+
+.. code-block:: python
+
+ def _backward(ctx, grad):
+ a, b = ctx.saved_tensors
+ grad_a, grad_b = None, None
+ if ctx.needs_input_grad[0]:
+ grad_a = grad * b
+ if ctx.needs_input_grad[1]:
+ grad_b = grad * a
+ return grad_a, grad_b, None
+
+ def _setup_context(ctx, inputs, output):
+ a, b, c = inputs
+ saved_a, saved_b = None, None
+ if ctx.needs_input_grad[0]:
+ saved_b = b
+ if ctx.needs_input_grad[1]:
+ saved_a = a
+ ctx.save_for_backward(saved_a, saved_b)
+
+ # This code adds training support for the operator. You must provide us
+ # the backward formula for the operator and a `setup_context` function
+ # to save values to be used in the backward.
+ torch.library.register_autograd(
+ "extension_cpp::mymuladd", _backward, setup_context=_setup_context)
+
+Note that the backward must be a composition of PyTorch-understood operators.
+If you wish to use another custom C++ or CUDA kernel in your backwards pass,
+it must be wrapped into a custom operator.
+
+If we had our own custom ``mymul`` kernel, we would need to wrap it into a
+custom operator and then call that from the backward:
+
+.. code-block:: cpp
+
+ // New! a mymul_cpu kernel
+ at::Tensor mymul_cpu(const at::Tensor& a, const at::Tensor& b) {
+ TORCH_CHECK(a.sizes() == b.sizes());
+ TORCH_CHECK(a.dtype() == at::kFloat);
+ TORCH_CHECK(b.dtype() == at::kFloat);
+ TORCH_CHECK(a.device().type() == at::DeviceType::CPU);
+ TORCH_CHECK(b.device().type() == at::DeviceType::CPU);
+ at::Tensor a_contig = a.contiguous();
+ at::Tensor b_contig = b.contiguous();
+ at::Tensor result = torch::empty(a_contig.sizes(), a_contig.options());
+ const float* a_ptr = a_contig.data_ptr();
+ const float* b_ptr = b_contig.data_ptr();
+ float* result_ptr = result.data_ptr();
+ for (int64_t i = 0; i < result.numel(); i++) {
+ result_ptr[i] = a_ptr[i] * b_ptr[i];
+ }
+ return result;
+ }
+
+ TORCH_LIBRARY(extension_cpp, m) {
+ m.def("mymuladd(Tensor a, Tensor b, float c) -> Tensor");
+ // New! defining the mymul operator
+ m.def("mymul(Tensor a, Tensor b) -> Tensor");
+ }
+
+
+ TORCH_LIBRARY_IMPL(extension_cpp, CPU, m) {
+ m.impl("mymuladd", &mymuladd_cpu);
+ // New! registering the cpu kernel for the mymul operator
+ m.impl("mymul", &mymul_cpu);
+ }
+
+.. code-block:: python
+
+ def _backward(ctx, grad):
+ a, b = ctx.saved_tensors
+ grad_a, grad_b = None, None
+ if ctx.needs_input_grad[0]:
+ grad_a = torch.ops.extension_cpp.mymul.default(grad, b)
+ if ctx.needs_input_grad[1]:
+ grad_b = torch.ops.extension_cpp.mymul.default(grad, a)
+ return grad_a, grad_b, None
+
+
+ def _setup_context(ctx, inputs, output):
+ a, b, c = inputs
+ saved_a, saved_b = None, None
+ if ctx.needs_input_grad[0]:
+ saved_b = b
+ if ctx.needs_input_grad[1]:
+ saved_a = a
+ ctx.save_for_backward(saved_a, saved_b)
+
+
+ # This code adds training support for the operator. You must provide us
+ # the backward formula for the operator and a `setup_context` function
+ # to save values to be used in the backward.
+ torch.library.register_autograd(
+ "extension_cpp::mymuladd", _backward, setup_context=_setup_context)
+
+Testing an operator
+-------------------
+Use ``torch.library.opcheck`` to test that the custom op was registered correctly.
+Note that this function does not test that the gradients are mathematically correct
+-- plan to write separate tests for that, either manual ones or by using
+``torch.autograd.gradcheck``.
+
+.. code-block:: python
+
+ def sample_inputs(device, *, requires_grad=False):
+ def make_tensor(*size):
+ return torch.randn(size, device=device, requires_grad=requires_grad)
+
+ def make_nondiff_tensor(*size):
+ return torch.randn(size, device=device, requires_grad=False)
+
+ return [
+ [make_tensor(3), make_tensor(3), 1],
+ [make_tensor(20), make_tensor(20), 3.14],
+ [make_tensor(20), make_nondiff_tensor(20), -123],
+ [make_nondiff_tensor(2, 3), make_tensor(2, 3), -0.3],
+ ]
+
+ def reference_muladd(a, b, c):
+ return a * b + c
+
+ samples = sample_inputs(device, requires_grad=True)
+ samples.extend(sample_inputs(device, requires_grad=False))
+ for args in samples:
+ # Correctness test
+ result = torch.ops.extension_cpp.mymuladd(*args)
+ expected = reference_muladd(*args)
+ torch.testing.assert_close(result, expected)
+
+ # Use opcheck to check for incorrect usage of operator registration APIs
+ torch.library.opcheck(torch.ops.extension_cpp.mymuladd.default, args)
+
+.. _mutable-ops:
+
+Creating mutable operators
+--------------------------
+You may wish to author a custom operator that mutates its inputs. Use ``Tensor(a!)``
+to specify each mutable Tensor in the schema; otherwise, there will be undefined
+behavior. If there are multiple mutated Tensors, use different names (for example, ``Tensor(a!)``,
+``Tensor(b!)``, ``Tensor(c!)``) for each mutable Tensor.
+
+Let's author a ``myadd_out(a, b, out)`` operator, which writes the contents of ``a+b`` into ``out``.
+
+.. code-block:: cpp
+
+ // An example of an operator that mutates one of its inputs.
+ void myadd_out_cpu(const at::Tensor& a, const at::Tensor& b, at::Tensor& out) {
+ TORCH_CHECK(a.sizes() == b.sizes());
+ TORCH_CHECK(b.sizes() == out.sizes());
+ TORCH_CHECK(a.dtype() == at::kFloat);
+ TORCH_CHECK(b.dtype() == at::kFloat);
+ TORCH_CHECK(out.dtype() == at::kFloat);
+ TORCH_CHECK(out.is_contiguous());
+ TORCH_INTERNAL_ASSERT(a.device().type() == at::DeviceType::CPU);
+ TORCH_INTERNAL_ASSERT(b.device().type() == at::DeviceType::CPU);
+ TORCH_INTERNAL_ASSERT(out.device().type() == at::DeviceType::CPU);
+ at::Tensor a_contig = a.contiguous();
+ at::Tensor b_contig = b.contiguous();
+ const float* a_ptr = a_contig.data_ptr();
+ const float* b_ptr = b_contig.data_ptr();
+ float* result_ptr = out.data_ptr();
+ for (int64_t i = 0; i < out.numel(); i++) {
+ result_ptr[i] = a_ptr[i] + b_ptr[i];
+ }
+ }
+
+When defining the operator, we must specify that it mutates the out Tensor in the schema:
+
+.. code-block:: cpp
+
+ TORCH_LIBRARY(extension_cpp, m) {
+ m.def("mymuladd(Tensor a, Tensor b, float c) -> Tensor");
+ m.def("mymul(Tensor a, Tensor b) -> Tensor");
+ // New!
+ m.def("myadd_out(Tensor a, Tensor b, Tensor(a!) out) -> ()");
+ }
+
+ TORCH_LIBRARY_IMPL(extension_cpp, CPU, m) {
+ m.impl("mymuladd", &mymuladd_cpu);
+ m.impl("mymul", &mymul_cpu);
+ // New!
+ m.impl("myadd_out", &myadd_out_cpu);
+ }
+
+.. note::
+
+ Do not return any mutated Tensors as outputs of the operator as this will
+ cause incompatibility with PyTorch subsystems like ``torch.compile``.
+
+Conclusion
+----------
+In this tutorial, we went over the recommended approach to integrating Custom C++
+and CUDA operators with PyTorch. The ``TORCH_LIBRARY/torch.library`` APIs are fairly
+low-level. For more information about how to use the API, see
+`The Custom Operators Manual `_.
diff --git a/advanced_source/cpp_custom_ops_sycl.rst b/advanced_source/cpp_custom_ops_sycl.rst
new file mode 100644
index 00000000000..3b3ad069b58
--- /dev/null
+++ b/advanced_source/cpp_custom_ops_sycl.rst
@@ -0,0 +1,274 @@
+.. _cpp-custom-ops-tutorial-sycl:
+
+Custom SYCL Operators
+=====================
+
+.. grid:: 2
+
+ .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn
+ :class-card: card-prerequisites
+
+ * How to integrate custom operators written in SYCL with PyTorch
+
+ .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites
+ :class-card: card-prerequisites
+
+ * PyTorch 2.8 or later
+ * Basic understanding of SYCL programming
+
+.. note::
+
+ ``SYCL`` serves as the backend programming language for Intel GPUs (device label ``xpu``). For configuration details, see:
+ `Getting Started on Intel GPUs `_. The Intel Compiler, which comes bundled with Intel Deep Learning Essentials, handles ``SYCL`` compilation. Ensure you install and activate the compiler environment prior to executing the code examples in this tutorial.
+
+PyTorch offers a large library of operators that work on Tensors (e.g. torch.add, torch.sum, etc).
+However, you may wish to bring a new custom operator to PyTorch. This tutorial demonstrates the
+best path to authoring a custom operator written in SYCL. Tutorials for C++ and CUDA operators are available in the :ref:`cpp-custom-ops-tutorial`.
+
+Follow the structure to create a custom SYCL operator:
+
+.. code-block:: text
+
+ sycl_example/
+ ├── setup.py
+ ├── sycl_extension
+ │ ├── __init__.py
+ │ ├── muladd.sycl
+ │ └── ops.py
+ └── test_sycl_extension.py
+
+Setting up the Build System
+---------------------------
+
+If you need to compile **SYCL** code (for example, ``.sycl`` files), use `torch.utils.cpp_extension.SyclExtension `_.
+The setup process is very similar to C++/CUDA, except the compilation arguments need to be adjusted for SYCL.
+
+Using ``sycl_extension`` is as straightforward as writing the following ``setup.py``:
+
+.. code-block:: python
+
+ import os
+ import torch
+ import glob
+ from setuptools import find_packages, setup
+ from torch.utils.cpp_extension import SyclExtension, BuildExtension
+
+ library_name = "sycl_extension"
+ py_limited_api = True
+ extra_compile_args = {
+ "cxx": ["-O3",
+ "-fdiagnostics-color=always",
+ "-DPy_LIMITED_API=0x03090000"],
+ "sycl": ["-O3" ]
+ }
+
+ assert(torch.xpu.is_available()), "XPU is not available, please check your environment"
+ # Source files collection
+ this_dir = os.path.dirname(os.path.curdir)
+ extensions_dir = os.path.join(this_dir, library_name)
+ sources = list(glob.glob(os.path.join(extensions_dir, "*.sycl")))
+ # Construct extension
+ ext_modules = [
+ SyclExtension(
+ f"{library_name}._C",
+ sources,
+ extra_compile_args=extra_compile_args,
+ py_limited_api=py_limited_api,
+ )
+ ]
+ setup(
+ name=library_name,
+ packages=find_packages(),
+ ext_modules=ext_modules,
+ install_requires=["torch"],
+ description="Simple Example of PyTorch Sycl extensions",
+ cmdclass={"build_ext": BuildExtension},
+ options={"bdist_wheel": {"py_limited_api": "cp39"}} if py_limited_api else {},
+ )
+
+
+Defining the custom op and adding backend implementations
+---------------------------------------------------------
+First, let's write a SYCL function that computes ``mymuladd``:
+
+In order to use this from PyTorch’s Python frontend, we need to register it
+as a PyTorch operator using the ``TORCH_LIBRARY`` API. This will automatically
+bind the operator to Python.
+
+
+If you also have a SYCL implementation of ``myaddmul``, you can also register it
+in a separate ``TORCH_LIBRARY_IMPL`` block:
+
+.. code-block:: cpp
+
+ #include
+ #include
+ #include
+ #include
+ #include
+
+ namespace sycl_extension {
+ // MulAdd Kernel: result = a * b + c
+ static void muladd_kernel(
+ int numel, const float* a, const float* b, float c, float* result,
+ const sycl::nd_item<1>& item) {
+ int idx = item.get_global_id(0);
+ if (idx < numel) {
+ result[idx] = a[idx] * b[idx] + c;
+ }
+ }
+
+ class MulAddKernelFunctor {
+ public:
+ MulAddKernelFunctor(int _numel, const float* _a, const float* _b, float _c, float* _result)
+ : numel(_numel), a(_a), b(_b), c(_c), result(_result) {}
+ void operator()(const sycl::nd_item<1>& item) const {
+ muladd_kernel(numel, a, b, c, result, item);
+ }
+
+ private:
+ int numel;
+ const float* a;
+ const float* b;
+ float c;
+ float* result;
+ };
+
+ at::Tensor mymuladd_xpu(const at::Tensor& a, const at::Tensor& b, double c) {
+ TORCH_CHECK(a.sizes() == b.sizes(), "a and b must have the same shape");
+ TORCH_CHECK(a.dtype() == at::kFloat, "a must be a float tensor");
+ TORCH_CHECK(b.dtype() == at::kFloat, "b must be a float tensor");
+ TORCH_CHECK(a.device().is_xpu(), "a must be an XPU tensor");
+ TORCH_CHECK(b.device().is_xpu(), "b must be an XPU tensor");
+
+ at::Tensor a_contig = a.contiguous();
+ at::Tensor b_contig = b.contiguous();
+ at::Tensor result = at::empty_like(a_contig);
+
+ const float* a_ptr = a_contig.data_ptr();
+ const float* b_ptr = b_contig.data_ptr();
+ float* res_ptr = result.data_ptr();
+ int numel = a_contig.numel();
+
+ sycl::queue& queue = c10::xpu::getCurrentXPUStream().queue();
+ constexpr int threads = 256;
+ int blocks = (numel + threads - 1) / threads;
+
+ queue.submit([&](sycl::handler& cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<1>(blocks * threads, threads),
+ MulAddKernelFunctor(numel, a_ptr, b_ptr, static_cast(c), res_ptr)
+ );
+ });
+
+ return result;
+ }
+ // Defines the operators
+ TORCH_LIBRARY(sycl_extension, m) {
+ m.def("mymuladd(Tensor a, Tensor b, float c) -> Tensor");
+ }
+
+ // ==================================================
+ // Register SYCL Implementations to Torch Library
+ // ==================================================
+ TORCH_LIBRARY_IMPL(sycl_extension, XPU, m) {
+ m.impl("mymuladd", &mymuladd_xpu);
+ }
+
+ } // namespace sycl_extension
+
+
+
+Create a Python Interface
+-------------------------
+
+Create a Python interface for our operator in the ``sycl_extension/ops.py`` file:
+
+.. code-block:: python
+
+ import torch
+ from torch import Tensor
+ __all__ = ["mymuladd"]
+
+ def mymuladd(a: Tensor, b: Tensor, c: float) -> Tensor:
+ """Performs a * b + c in an efficient fused kernel"""
+ return torch.ops.sycl_extension.mymuladd.default(a, b, c)
+
+Initialize Package
+------------------
+
+Create ``sycl_extension/__init__.py`` file to make the package importable:
+
+.. code-block:: python
+
+ import ctypes
+ from pathlib import Path
+
+ import torch
+
+ current_dir = Path(__file__).parent.parent
+ build_dir = current_dir / "build"
+ so_files = list(build_dir.glob("**/*.so"))
+
+ assert len(so_files) == 1, f"Expected one _C*.so file, found {len(so_files)}"
+
+ with torch._ops.dl_open_guard():
+ loaded_lib = ctypes.CDLL(so_files[0])
+
+ from . import ops
+
+ __all__ = [
+ "loaded_lib",
+ "ops",
+ ]
+
+Testing SYCL extension operator
+-------------------
+
+Use simple test to verify that the operator works correctly.
+
+.. code-block:: python
+
+ import torch
+ from torch.testing._internal.common_utils import TestCase
+ import unittest
+ import sycl_extension
+
+ def reference_muladd(a, b, c):
+ return a * b + c
+
+ class TestMyMulAdd(TestCase):
+ def sample_inputs(self, device, *, requires_grad=False):
+ def make_tensor(*size):
+ return torch.randn(size, device=device, requires_grad=requires_grad)
+
+ def make_nondiff_tensor(*size):
+ return torch.randn(size, device=device, requires_grad=False)
+
+ return [
+ [make_tensor(3), make_tensor(3), 1],
+ [make_tensor(20), make_tensor(20), 3.14],
+ [make_tensor(20), make_nondiff_tensor(20), -123],
+ [make_nondiff_tensor(2, 3), make_tensor(2, 3), -0.3],
+ ]
+
+ def _test_correctness(self, device):
+ samples = self.sample_inputs(device)
+ for args in samples:
+ result = sycl_extension.ops.mymuladd(*args)
+ expected = reference_muladd(*args)
+ torch.testing.assert_close(result, expected)
+
+ @unittest.skipIf(not torch.xpu.is_available(), "requires Intel GPU")
+ def test_correctness_xpu(self):
+ self._test_correctness("xpu")
+
+ if __name__ == "__main__":
+ unittest.main()
+
+This test checks the correctness of the custom operator by comparing its output against a reference implementation.
+
+Conclusion
+----------
+
+In this tutorial, we demonstrated how to implement and compile custom SYCL operators for PyTorch. We specifically showcased an inference operation ``muladd``. For adding backward support or enabling torch.compile compatibility, please refer to :ref:`cpp-custom-ops-tutorial`.
diff --git a/advanced_source/cpp_export.rst b/advanced_source/cpp_export.rst
index e0081377584..56c4bcbaae7 100644
--- a/advanced_source/cpp_export.rst
+++ b/advanced_source/cpp_export.rst
@@ -1,376 +1,3 @@
-Loading a PyTorch Model in C++
-==============================
-
-As its name suggests, the primary interface to PyTorch is the Python
-programming language. While Python is a suitable and preferred language for
-many scenarios requiring dynamism and ease of iteration, there are equally many
-situations where precisely these properties of Python are unfavorable. One
-environment in which the latter often applies is *production* -- the land of
-low latencies and strict deployment requirements. For production scenarios, C++
-is very often the language of choice, even if only to bind it into another
-language like Java, Rust or Go. The following paragraphs will outline the path
-PyTorch provides to go from an existing Python model to a serialized
-representation that can be *loaded* and *executed* purely from C++, with no
-dependency on Python.
-
-Step 1: Converting Your PyTorch Model to Torch Script
------------------------------------------------------
-
-A PyTorch model's journey from Python to C++ is enabled by `Torch Script
-`_, a representation of a PyTorch
-model that can be understood, compiled and serialized by the Torch Script
-compiler. If you are starting out from an existing PyTorch model written in the
-vanilla "eager" API, you must first convert your model to Torch Script. In the
-most common cases, discussed below, this requires only little effort. If you
-already have a Torch Script module, you can skip to the next section of this
-tutorial.
-
-There exist two ways of converting a PyTorch model to Torch Script. The first
-is known as *tracing*, a mechanism in which the structure of the model is
-captured by evaluating it once using example inputs, and recording the flow of
-those inputs through the model. This is suitable for models that make limited
-use of control flow. The second approach is to add explicit annotations to your
-model that inform the Torch Script compiler that it may directly parse and
-compile your model code, subject to the constraints imposed by the Torch Script
-language.
-
-.. tip::
-
- You can find the complete documentation for both of these methods, as well as
- further guidance on which to use, in the official `Torch Script
- reference `_.
-
-Converting to Torch Script via Tracing
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-To convert a PyTorch model to Torch Script via tracing, you must pass an
-instance of your model along with an example input to the ``torch.jit.trace``
-function. This will produce a ``torch.jit.ScriptModule`` object with the trace
-of your model evaluation embedded in the module's ``forward`` method::
-
- import torch
- import torchvision
-
- # An instance of your model.
- model = torchvision.models.resnet18()
-
- # An example input you would normally provide to your model's forward() method.
- example = torch.rand(1, 3, 224, 224)
-
- # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
- traced_script_module = torch.jit.trace(model, example)
-
-The traced ``ScriptModule`` can now be evaluated identically to a regular
-PyTorch module::
-
- In[1]: output = traced_script_module(torch.ones(1, 3, 224, 224))
- In[2]: output[0, :5]
- Out[2]: tensor([-0.2698, -0.0381, 0.4023, -0.3010, -0.0448], grad_fn=)
-
-Converting to Torch Script via Annotation
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Under certain circumstances, such as if your model employs particular forms of
-control flow, you may want to write your model in Torch Script directly and
-annotate your model accordingly. For example, say you have the following
-vanilla Pytorch model::
-
- import torch
-
- class MyModule(torch.nn.Module):
- def __init__(self, N, M):
- super(MyModule, self).__init__()
- self.weight = torch.nn.Parameter(torch.rand(N, M))
-
- def forward(self, input):
- if input.sum() > 0:
- output = self.weight.mv(input)
- else:
- output = self.weight + input
- return output
-
-
-Because the ``forward`` method of this module uses control flow that is
-dependent on the input, it is not suitable for tracing. Instead, we can convert
-it to a ``ScriptModule`` by subclassing it from ``torch.jit.ScriptModule`` and
-adding a ``@torch.jit.script_method`` annotation to the model's ``forward``
-method::
-
- import torch
-
- class MyModule(torch.jit.ScriptModule):
- def __init__(self, N, M):
- super(MyModule, self).__init__()
- self.weight = torch.nn.Parameter(torch.rand(N, M))
-
- @torch.jit.script_method
- def forward(self, input):
- if bool(input.sum() > 0):
- output = self.weight.mv(input)
- else:
- output = self.weight + input
- return output
-
- my_script_module = MyModule(2, 3)
-
-Creating a new ``MyModule`` object now directly produces an instance of
-``ScriptModule`` that is ready for serialization.
-
-Step 2: Serializing Your Script Module to a File
--------------------------------------------------
-
-Once you have a ``ScriptModule`` in your hands, either from tracing or
-annotating a PyTorch model, you are ready to serialize it to a file. Later on,
-you'll be able to load the module from this file in C++ and execute it without
-any dependency on Python. Say we want to serialize the ``ResNet18`` model shown
-earlier in the tracing example. To perform this serialization, simply call
-`save `_
-on the module and pass it a filename::
-
- traced_script_module.save("model.pt")
-
-This will produce a ``model.pt`` file in your working directory. We have now
-officially left the realm of Python and are ready to cross over to the sphere
-of C++.
-
-Step 3: Loading Your Script Module in C++
-------------------------------------------
-
-To load your serialized PyTorch model in C++, your application must depend on
-the PyTorch C++ API -- also known as *LibTorch*. The LibTorch distribution
-encompasses a collection of shared libraries, header files and CMake build
-configuration files. While CMake is not a requirement for depending on
-LibTorch, it is the recommended approach and will be well supported into the
-future. For this tutorial, we will be building a minimal C++ application using
-CMake and LibTorch that simply loads and executes a serialized PyTorch model.
-
-A Minimal C++ Application
-^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Let's begin by discussing the code to load a module. The following will already
-do:
-
-.. code-block:: cpp
-
- #include // One-stop header.
-
- #include
- #include
-
- int main(int argc, const char* argv[]) {
- if (argc != 2) {
- std::cerr << "usage: example-app \n";
- return -1;
- }
-
- // Deserialize the ScriptModule from a file using torch::jit::load().
- std::shared_ptr module = torch::jit::load(argv[1]);
-
- assert(module != nullptr);
- std::cout << "ok\n";
- }
-
-The ```` header encompasses all relevant includes from the
-LibTorch library necessary to run the example. Our application accepts the file
-path to a serialized PyTorch ``ScriptModule`` as its only command line argument
-and then proceeds to deserialize the module using the ``torch::jit::load()``
-function, which takes this file path as input. In return we receive a shared
-pointer to a ``torch::jit::script::Module``, the equivalent to a
-``torch.jit.ScriptModule`` in C++. For now, we only verify that this pointer is
-not null. We will examine how to execute it in a moment.
-
-Depending on LibTorch and Building the Application
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Assume we stored the above code into a file called ``example-app.cpp``. A
-minimal ``CMakeLists.txt`` to build it could look as simple as:
-
-.. code-block:: cmake
-
- cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
- project(custom_ops)
-
- find_package(Torch REQUIRED)
-
- add_executable(example-app example-app.cpp)
- target_link_libraries(example-app "${TORCH_LIBRARIES}")
- set_property(TARGET example-app PROPERTY CXX_STANDARD 11)
-
-The last thing we need to build the example application is the LibTorch
-distribution. You can always grab the latest stable release from the `download
-page `_ on the PyTorch website. If you download and unzip
-the latest archive, you should receive a folder with the following directory
-structure:
-
-.. code-block:: sh
-
- libtorch/
- bin/
- include/
- lib/
- share/
-
-- The ``lib/`` folder contains the shared libraries you must link against,
-- The ``include/`` folder contains header files your program will need to include,
-- The ``share/`` folder contains the necessary CMake configuration to enable the simple ``find_package(Torch)`` command above.
-
-.. tip::
- On Windows, debug and release builds are not ABI-compatible. If you plan to
- build your project in debug mode, please try the debug version of LibTorch.
-
-The last step is building the application. For this, assume our example
-directory is laid out like this:
-
-.. code-block:: sh
-
- example-app/
- CMakeLists.txt
- example-app.cpp
-
-We can now run the following commands to build the application from within the
-``example-app/`` folder:
-
-.. code-block:: sh
-
- mkdir build
- cd build
- cmake -DCMAKE_PREFIX_PATH=/path/to/libtorch ..
- make
-
-where ``/path/to/libtorch`` should be the full path to the unzipped LibTorch
-distribution. If all goes well, it will look something like this:
-
-.. code-block:: sh
-
- root@4b5a67132e81:/example-app# mkdir build
- root@4b5a67132e81:/example-app# cd build
- root@4b5a67132e81:/example-app/build# cmake -DCMAKE_PREFIX_PATH=/path/to/libtorch ..
- -- The C compiler identification is GNU 5.4.0
- -- The CXX compiler identification is GNU 5.4.0
- -- Check for working C compiler: /usr/bin/cc
- -- Check for working C compiler: /usr/bin/cc -- works
- -- Detecting C compiler ABI info
- -- Detecting C compiler ABI info - done
- -- Detecting C compile features
- -- Detecting C compile features - done
- -- Check for working CXX compiler: /usr/bin/c++
- -- Check for working CXX compiler: /usr/bin/c++ -- works
- -- Detecting CXX compiler ABI info
- -- Detecting CXX compiler ABI info - done
- -- Detecting CXX compile features
- -- Detecting CXX compile features - done
- -- Looking for pthread.h
- -- Looking for pthread.h - found
- -- Looking for pthread_create
- -- Looking for pthread_create - not found
- -- Looking for pthread_create in pthreads
- -- Looking for pthread_create in pthreads - not found
- -- Looking for pthread_create in pthread
- -- Looking for pthread_create in pthread - found
- -- Found Threads: TRUE
- -- Configuring done
- -- Generating done
- -- Build files have been written to: /example-app/build
- root@4b5a67132e81:/example-app/build# make
- Scanning dependencies of target example-app
- [ 50%] Building CXX object CMakeFiles/example-app.dir/example-app.cpp.o
- [100%] Linking CXX executable example-app
- [100%] Built target example-app
-
-If we supply the path to the serialized ``ResNet18`` model we created earlier
-to the resulting ``example-app`` binary, we should be rewarded with a friendly
-"ok":
-
-.. code-block:: sh
-
- root@4b5a67132e81:/example-app/build# ./example-app model.pt
- ok
-
-Step 4: Executing the Script Module in C++
-------------------------------------------
-
-Having successfully loaded our serialized ``ResNet18`` in C++, we are now just a
-couple lines of code away from executing it! Let's add those lines to our C++
-application's ``main()`` function:
-
-.. code-block:: cpp
-
- // Create a vector of inputs.
- std::vector inputs;
- inputs.push_back(torch::ones({1, 3, 224, 224}));
-
- // Execute the model and turn its output into a tensor.
- at::Tensor output = module->forward(inputs).toTensor();
-
- std::cout << output.slice(/*dim=*/1, /*start=*/0, /*end=*/5) << '\n';
-
-The first two lines set up the inputs to our model. We create a vector of
-``torch::jit::IValue`` (a type-erased value type ``script::Module`` methods
-accept and return) and add a single input. To create the input tensor, we use
-``torch::ones()``, the equivalent to ``torch.ones`` in the C++ API. We then
-run the ``script::Module``'s ``forward`` method, passing it the input vector we
-created. In return we get a new ``IValue``, which we convert to a tensor by
-calling ``toTensor()``.
-
-.. tip::
-
- To learn more about functions like ``torch::ones`` and the PyTorch C++ API in
- general, refer to its documentation at https://pytorch.org/cppdocs. The
- PyTorch C++ API provides near feature parity with the Python API, allowing
- you to further manipulate and process tensors just like in Python.
-
-In the last line, we print the first five entries of the output. Since we
-supplied the same input to our model in Python earlier in this tutorial, we
-should ideally see the same output. Let's try it out by re-compiling our
-application and running it with the same serialized model:
-
-.. code-block:: sh
-
- root@4b5a67132e81:/example-app/build# make
- Scanning dependencies of target example-app
- [ 50%] Building CXX object CMakeFiles/example-app.dir/example-app.cpp.o
- [100%] Linking CXX executable example-app
- [100%] Built target example-app
- root@4b5a67132e81:/example-app/build# ./example-app model.pt
- -0.2698 -0.0381 0.4023 -0.3010 -0.0448
- [ Variable[CPUFloatType]{1,5} ]
-
-
-For reference, the output in Python previously was::
-
- tensor([-0.2698, -0.0381, 0.4023, -0.3010, -0.0448], grad_fn=)
-
-Looks like a good match!
-
-.. tip::
-
- To move your model to GPU memory, you can write ``model->to(at::kCUDA);``.
- Make sure the inputs to a model living in CUDA memory are also in CUDA memory
- by calling ``tensor.to(at::kCUDA)``, which will return a new tensor in CUDA
- memory.
-
-Step 5: Getting Help and Exploring the API
-------------------------------------------
-
-This tutorial has hopefully equipped you with a general understanding of a
-PyTorch model's path from Python to C++. With the concepts described in this
-tutorial, you should be able to go from a vanilla, "eager" PyTorch model, to a
-compiled ``ScriptModule`` in Python, to a serialized file on disk and -- to
-close the loop -- to an executable ``script::Module`` in C++.
-
-Of course, there are many concepts we did not cover. For example, you may find
-yourself wanting to extend your ``ScriptModule`` with a custom operator
-implemented in C++ or CUDA, and executing this custom operator inside your
-``ScriptModule`` loaded in your pure C++ production environment. The good news
-is: this is possible, and well supported! For now, you can explore `this
-`_ folder
-for examples, and we will follow up with a tutorial shortly. In the time being,
-the following links may be generally helpful:
-
-- The Torch Script reference: https://pytorch.org/docs/master/jit.html
-- The PyTorch C++ API documentation: https://pytorch.org/cppdocs/
-- The PyTorch Python API documentation: https://pytorch.org/docs/
-
-As always, if you run into any problems or have questions, you can use our
-`forum `_ or `GitHub issues
-`_ to get in touch.
+.. warning::
+ TorchScript is deprecated, please use
+ `torch.export `__ instead.
\ No newline at end of file
diff --git a/advanced_source/cpp_extension.rst b/advanced_source/cpp_extension.rst
deleted file mode 100644
index 383d14851d1..00000000000
--- a/advanced_source/cpp_extension.rst
+++ /dev/null
@@ -1,1182 +0,0 @@
-Custom C++ and CUDA Extensions
-==============================
-**Author**: `Peter Goldsborough `_
-
-
-PyTorch provides a plethora of operations related to neural networks, arbitrary
-tensor algebra, data wrangling and other purposes. However, you may still find
-yourself in need of a more customized operation. For example, you might want to
-use a novel activation function you found in a paper, or implement an operation
-you developed as part of your research.
-
-The easiest way of integrating such a custom operation in PyTorch is to write it
-in Python by extending :class:`Function` and :class:`Module` as outlined `here
-`_. This gives you the full
-power of automatic differentiation (spares you from writing derivative
-functions) as well as the usual expressiveness of Python. However, there may be
-times when your operation is better implemented in C++. For example, your code
-may need to be *really* fast because it is called very frequently in your model
-or is very expensive even for few calls. Another plausible reason is that it
-depends on or interacts with other C or C++ libraries. To address such cases,
-PyTorch provides a very easy way of writing custom *C++ extensions*.
-
-C++ extensions are a mechanism we have developed to allow users (you) to create
-PyTorch operators defined *out-of-source*, i.e. separate from the PyTorch
-backend. This approach is *different* from the way native PyTorch operations are
-implemented. C++ extensions are intended to spare you much of the boilerplate
-associated with integrating an operation with PyTorch's backend while providing
-you with a high degree of flexibility for your PyTorch-based projects.
-Nevertheless, once you have defined your operation as a C++ extension, turning
-it into a native PyTorch function is largely a matter of code organization,
-which you can tackle after the fact if you decide to contribute your operation
-upstream.
-
-Motivation and Example
-----------------------
-
-The rest of this note will walk through a practical example of writing and using
-a C++ (and CUDA) extension. If you are being chased or someone will fire you if
-you don't get that op done by the end of the day, you can skip this section and
-head straight to the implementation details in the next section.
-
-Let's say you've come up with a new kind of recurrent unit that you found to
-have superior properties compared to the state of the art. This recurrent unit
-is similar to an LSTM, but differs in that it lacks a *forget gate* and uses an
-*Exponential Linear Unit* (ELU) as its internal activation function. Because
-this unit never forgets, we'll call it *LLTM*, or *Long-Long-Term-Memory* unit.
-
-The two ways in which LLTMs differ from vanilla LSTMs are significant enough
-that we can't configure PyTorch's ``LSTMCell`` for our purposes, so we'll have to
-create a custom cell. The first and easiest approach for this -- and likely in
-all cases a good first step -- is to implement our desired functionality in
-plain PyTorch with Python. For this, we need to subclass
-:class:`torch.nn.Module` and implement the forward pass of the LLTM. This would
-look something like this::
-
- class LLTM(torch.nn.Module):
- def __init__(self, input_features, state_size):
- super(LLTM, self).__init__()
- self.input_features = input_features
- self.state_size = state_size
- # 3 * state_size for input gate, output gate and candidate cell gate.
- # input_features + state_size because we will multiply with [input, h].
- self.weights = torch.nn.Parameter(
- torch.empty(3 * state_size, input_features + state_size))
- self.bias = torch.nn.Parameter(torch.empty(3 * state_size))
- self.reset_parameters()
-
- def reset_parameters(self):
- stdv = 1.0 / math.sqrt(self.state_size)
- for weight in self.parameters():
- weight.data.uniform_(-stdv, +stdv)
-
- def forward(self, input, state):
- old_h, old_cell = state
- X = torch.cat([old_h, input], dim=1)
-
- # Compute the input, output and candidate cell gates with one MM.
- gate_weights = F.linear(X, self.weights, self.bias)
- # Split the combined gate weight matrix into its components.
- gates = gate_weights.chunk(3, dim=1)
-
- input_gate = torch.sigmoid(gates[0])
- output_gate = torch.sigmoid(gates[1])
- # Here we use an ELU instead of the usual tanh.
- candidate_cell = F.elu(gates[2])
-
- # Compute the new cell state.
- new_cell = old_cell + candidate_cell * input_gate
- # Compute the new hidden state and output.
- new_h = torch.tanh(new_cell) * output_gate
-
- return new_h, new_cell
-
-which we could then use as expected::
-
- import torch
-
- X = torch.randn(batch_size, input_features)
- h = torch.randn(batch_size, state_size)
- C = torch.randn(batch_size, state_size)
-
- rnn = LLTM(input_features, state_size)
-
- new_h, new_C = rnn(X, (h, C))
-
-Naturally, if at all possible and plausible, you should use this approach to
-extend PyTorch. Since PyTorch has highly optimized implementations of its
-operations for CPU *and* GPU, powered by libraries such as `NVIDIA cuDNN
-`_, `Intel MKL
-`_ or `NNPACK
-`_, PyTorch code like above will often be
-fast enough. However, we can also see why, under certain circumstances, there is
-room for further performance improvements. The most obvious reason is that
-PyTorch has no knowledge of the *algorithm* you are implementing. It knows only
-of the individual operations you use to compose your algorithm. As such, PyTorch
-must execute your operations individually, one after the other. Since each
-individual call to the implementation (or *kernel*) of an operation, which may
-involve launch of a CUDA kernel, has a certain amount of overhead, this overhead
-may become significant across many function calls. Furthermore, the Python
-interpreter that is running our code can itself slow down our program.
-
-A definite method of speeding things up is therefore to rewrite parts in C++ (or
-CUDA) and *fuse* particular groups of operations. Fusing means combining the
-implementations of many functions into a single functions, which profits from
-fewer kernel launches as well as other optimizations we can perform with
-increased visibility of the global flow of data.
-
-Let's see how we can use C++ extensions to implement a *fused* version of the
-LLTM. We'll begin by writing it in plain C++, using the `ATen
-`_ library that powers much of PyTorch's
-backend, and see how easily it lets us translate our Python code. We'll then
-speed things up even more by moving parts of the model to CUDA kernel to benefit
-from the massive parallelism GPUs provide.
-
-Writing a C++ Extension
------------------------
-
-C++ extensions come in two flavors: They can be built "ahead of time" with
-:mod:`setuptools`, or "just in time" via
-:func:`torch.utils.cpp_extension.load`. We'll begin with the first approach and
-discuss the latter later.
-
-Building with :mod:`setuptools`
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-For the "ahead of time" flavor, we build our C++ extension by writing a
-``setup.py`` script that uses setuptools to compile our C++ code. For the LLTM, it
-looks as simple as this::
-
- from setuptools import setup
- from torch.utils.cpp_extension import CppExtension, BuildExtension
-
- setup(name='lltm_cpp',
- ext_modules=[CppExtension('lltm', ['lltm.cpp'])],
- cmdclass={'build_ext': BuildExtension})
-
-
-In this code, :class:`CppExtension` is a convenience wrapper around
-:class:`setuptools.Extension` that passes the correct include paths and sets
-the language of the extension to C++. The equivalent vanilla :mod:`setuptools`
-code would simply be::
-
- setuptools.Extension(
- name='lltm_cpp',
- sources=['lltm.cpp'],
- include_dirs=torch.utils.cpp_extension.include_paths(),
- language='c++')
-
-:class:`BuildExtension` performs a number of required configuration steps and
-checks and also manages mixed compilation in the case of mixed C++/CUDA
-extensions. And that's all we really need to know about building C++ extensions
-for now! Let's now take a look at the implementation of our C++ extension,
-which goes into ``lltm.cpp``.
-
-Writing the C++ Op
-^^^^^^^^^^^^^^^^^^
-
-Let's start implementing the LLTM in C++! One function we'll need for the
-backward pass is the derivative of the sigmoid. This is a small enough piece of
-code to discuss the overall environment that is available to us when writing C++
-extensions:
-
-.. code-block:: cpp
-
- #include
-
- #include
-
- torch::Tensor d_sigmoid(torch::Tensor z) {
- auto s = torch::sigmoid(z);
- return (1 - s) * s;
- }
-
-```` is the one-stop header to include all the necessary PyTorch
-bits to write C++ extensions. It includes:
-
-- The ATen library, which is our primary API for tensor computation,
-- `pybind11 `_, which is how we create Python bindings for our C++ code,
-- Headers that manage the details of interaction between ATen and pybind11.
-
-The implementation of :func:`d_sigmoid` shows how to use the ATen API.
-PyTorch's tensor and variable interface is generated automatically from the
-ATen library, so we can more or less translate our Python implementation 1:1
-into C++. Our primary datatype for all computations will be
-:class:`torch::Tensor`. Its full API can be inspected `here
-`_. Notice
-also that we can include ```` or *any other C or C++ header* -- we have
-the full power of C++11 at our disposal.
-
-Forward Pass
-************
-
-Next we can port our entire forward pass to C++:
-
-.. code-block:: cpp
-
- #include
-
- std::vector lltm_forward(
- torch::Tensor input,
- torch::Tensor weights,
- torch::Tensor bias,
- torch::Tensor old_h,
- torch::Tensor old_cell) {
- auto X = torch::cat({old_h, input}, /*dim=*/1);
-
- auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1));
- auto gates = gate_weights.chunk(3, /*dim=*/1);
-
- auto input_gate = torch::sigmoid(gates[0]);
- auto output_gate = torch::sigmoid(gates[1]);
- auto candidate_cell = torch::elu(gates[2], /*alpha=*/1.0);
-
- auto new_cell = old_cell + candidate_cell * input_gate;
- auto new_h = torch::tanh(new_cell) * output_gate;
-
- return {new_h,
- new_cell,
- input_gate,
- output_gate,
- candidate_cell,
- X,
- gate_weights};
- }
-
-Backward Pass
-*************
-
-The C++ extension API currently does not provide a way of automatically
-generating a backwards function for us. As such, we have to also implement the
-backward pass of our LLTM, which computes the derivative of the loss with
-respect to each input of the forward pass. Ultimately, we will plop both the
-forward and backward function into a :class:`torch.autograd.Function` to create
-a nice Python binding. The backward function is slightly more involved, so
-we'll not dig deeper into the code (if you are interested, `Alex Graves' thesis
-`_ is a good read for more
-information on this):
-
-.. code-block:: cpp
-
- // tanh'(z) = 1 - tanh^2(z)
- torch::Tensor d_tanh(torch::Tensor z) {
- return 1 - z.tanh().pow(2);
- }
-
- // elu'(z) = relu'(z) + { alpha * exp(z) if (alpha * (exp(z) - 1)) < 0, else 0}
- torch::Tensor d_elu(torch::Tensor z, torch::Scalar alpha = 1.0) {
- auto e = z.exp();
- auto mask = (alpha * (e - 1)) < 0;
- return (z > 0).type_as(z) + mask.type_as(z) * (alpha * e);
- }
-
- std::vector lltm_backward(
- torch::Tensor grad_h,
- torch::Tensor grad_cell,
- torch::Tensor new_cell,
- torch::Tensor input_gate,
- torch::Tensor output_gate,
- torch::Tensor candidate_cell,
- torch::Tensor X,
- torch::Tensor gate_weights,
- torch::Tensor weights) {
- auto d_output_gate = torch::tanh(new_cell) * grad_h;
- auto d_tanh_new_cell = output_gate * grad_h;
- auto d_new_cell = d_tanh(new_cell) * d_tanh_new_cell + grad_cell;
-
- auto d_old_cell = d_new_cell;
- auto d_candidate_cell = input_gate * d_new_cell;
- auto d_input_gate = candidate_cell * d_new_cell;
-
- auto gates = gate_weights.chunk(3, /*dim=*/1);
- d_input_gate *= d_sigmoid(gates[0]);
- d_output_gate *= d_sigmoid(gates[1]);
- d_candidate_cell *= d_elu(gates[2]);
-
- auto d_gates =
- torch::cat({d_input_gate, d_output_gate, d_candidate_cell}, /*dim=*/1);
-
- auto d_weights = d_gates.t().mm(X);
- auto d_bias = d_gates.sum(/*dim=*/0, /*keepdim=*/true);
-
- auto d_X = d_gates.mm(weights);
- const auto state_size = grad_h.size(1);
- auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size);
- auto d_input = d_X.slice(/*dim=*/1, state_size);
-
- return {d_old_h, d_input, d_weights, d_bias, d_old_cell};
- }
-
-Binding to Python
-^^^^^^^^^^^^^^^^^
-
-Once you have your operation written in C++ and ATen, you can use pybind11 to
-bind your C++ functions or classes into Python in a very simple manner.
-Questions or issues you have about this part of PyTorch C++ extensions will
-largely be addressed by `pybind11 documentation
- `_.
-
-For our extensions, the necessary binding code spans only four lines:
-
-.. code-block:: cpp
-
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def("forward", &lltm_forward, "LLTM forward");
- m.def("backward", &lltm_backward, "LLTM backward");
- }
-
-One bit to note here is the macro ``TORCH_EXTENSION_NAME``. The torch extension
-build will define it as the name you give your extension in the ``setup.py``
-script. In this case, the value of ``TORCH_EXTENSION_NAME`` would be "lltm".
-This is to avoid having to maintain the name of the extension in two places
-(the build script and your C++ code), as a mismatch between the two can lead to
-nasty and hard to track issues.
-
-Using Your Extension
-^^^^^^^^^^^^^^^^^^^^
-
-We are now set to import our extension in PyTorch. At this point, your directory
-structure could look something like this::
-
- pytorch/
- lltm-extension/
- lltm.cpp
- setup.py
-
-Now, run ``python setup.py install`` to build and install your extension. This
-should look something like this::
-
- running install
- running bdist_egg
- running egg_info
- creating lltm_cpp.egg-info
- writing lltm_cpp.egg-info/PKG-INFO
- writing dependency_links to lltm_cpp.egg-info/dependency_links.txt
- writing top-level names to lltm_cpp.egg-info/top_level.txt
- writing manifest file 'lltm_cpp.egg-info/SOURCES.txt'
- reading manifest file 'lltm_cpp.egg-info/SOURCES.txt'
- writing manifest file 'lltm_cpp.egg-info/SOURCES.txt'
- installing library code to build/bdist.linux-x86_64/egg
- running install_lib
- running build_ext
- building 'lltm_cpp' extension
- creating build
- creating build/temp.linux-x86_64-3.7
- gcc -pthread -B ~/local/miniconda/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I~/local/miniconda/lib/python3.7/site-packages/torch/include -I~/local/miniconda/lib/python3.7/site-packages/torch/include/torch/csrc/api/include -I~/local/miniconda/lib/python3.7/site-packages/torch/include/TH -I~/local/miniconda/lib/python3.7/site-packages/torch/include/THC -I~/local/miniconda/include/python3.7m -c lltm.cpp -o build/temp.linux-x86_64-3.7/lltm.o -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=lltm_cpp -D_GLIBCXX_USE_CXX11_ABI=1 -std=c++11
- cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++
- creating build/lib.linux-x86_64-3.7
- g++ -pthread -shared -B ~/local/miniconda/compiler_compat -L~/local/miniconda/lib -Wl,-rpath=~/local/miniconda/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/lltm.o -o build/lib.linux-x86_64-3.7/lltm_cpp.cpython-37m-x86_64-linux-gnu.so
- creating build/bdist.linux-x86_64
- creating build/bdist.linux-x86_64/egg
- copying build/lib.linux-x86_64-3.7/lltm_cpp.cpython-37m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg
- creating stub loader for lltm_cpp.cpython-37m-x86_64-linux-gnu.so
- byte-compiling build/bdist.linux-x86_64/egg/lltm_cpp.py to lltm_cpp.cpython-37.pyc
- creating build/bdist.linux-x86_64/egg/EGG-INFO
- copying lltm_cpp.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO
- copying lltm_cpp.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO
- copying lltm_cpp.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO
- copying lltm_cpp.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO
- writing build/bdist.linux-x86_64/egg/EGG-INFO/native_libs.txt
- zip_safe flag not set; analyzing archive contents...
- __pycache__.lltm_cpp.cpython-37: module references __file__
- creating 'dist/lltm_cpp-0.0.0-py3.7-linux-x86_64.egg' and adding 'build/bdist.linux-x86_64/egg' to it
- removing 'build/bdist.linux-x86_64/egg' (and everything under it)
- Processing lltm_cpp-0.0.0-py3.7-linux-x86_64.egg
- removing '~/local/miniconda/lib/python3.7/site-packages/lltm_cpp-0.0.0-py3.7-linux-x86_64.egg' (and everything under it)
- creating ~/local/miniconda/lib/python3.7/site-packages/lltm_cpp-0.0.0-py3.7-linux-x86_64.egg
- Extracting lltm_cpp-0.0.0-py3.7-linux-x86_64.egg to ~/local/miniconda/lib/python3.7/site-packages
- lltm-cpp 0.0.0 is already the active version in easy-install.pth
-
- Installed ~/local/miniconda/lib/python3.7/site-packages/lltm_cpp-0.0.0-py3.7-linux-x86_64.egg
- Processing dependencies for lltm-cpp==0.0.0
- Finished processing dependencies for lltm-cpp==0.0.0
-
-
-A small note on compilers: Due to ABI versioning issues, the compiler you use to
-build your C++ extension must be *ABI-compatible* with the compiler PyTorch was
-built with. In practice, this means that you must use GCC version 4.9 and above on Linux.
-For Ubuntu 16.04 and other more-recent Linux distributions, this should be the
-default compiler already. On MacOS, you must use clang (which does not have any ABI versioning issues). In the worst
-case, you can build PyTorch from source with your compiler and then build the
-extension with that same compiler.
-
-Once your extension is built, you can simply import it in Python, using the
-name you specified in your ``setup.py`` script. Just be sure to ``import
-torch`` first, as this will resolve some symbols that the dynamic linker must
-see::
-
- In [1]: import torch
- In [2]: import lltm_cpp
- In [3]: lltm_cpp.forward
- Out[3]:
-
-If we call ``help()`` on the function or module, we can see that its signature
-matches our C++ code::
-
- In[4] help(lltm.forward)
- forward(...) method of builtins.PyCapsule instance
- forward(arg0: torch::Tensor, arg1: torch::Tensor, arg2: torch::Tensor, arg3: torch::Tensor, arg4: torch::Tensor) -> List[torch::Tensor]
-
- LLTM forward
-
-Since we are now able to call our C++ functions from Python, we can wrap them
-with :class:`torch.autograd.Function` and :class:`torch.nn.Module` to make them first
-class citizens of PyTorch::
-
- import math
- import torch
-
- # Our module!
- import lltm_cpp
-
- class LLTMFunction(torch.autograd.Function):
- @staticmethod
- def forward(ctx, input, weights, bias, old_h, old_cell):
- outputs = lltm_cpp.forward(input, weights, bias, old_h, old_cell)
- new_h, new_cell = outputs[:2]
- variables = outputs[1:] + [weights]
- ctx.save_for_backward(*variables)
-
- return new_h, new_cell
-
- @staticmethod
- def backward(ctx, grad_h, grad_cell):
- outputs = lltm_cpp.backward(
- grad_h.contiguous(), grad_cell.contiguous(), *ctx.saved_variables)
- d_old_h, d_input, d_weights, d_bias, d_old_cell = outputs
- return d_input, d_weights, d_bias, d_old_h, d_old_cell
-
-
- class LLTM(torch.nn.Module):
- def __init__(self, input_features, state_size):
- super(LLTM, self).__init__()
- self.input_features = input_features
- self.state_size = state_size
- self.weights = torch.nn.Parameter(
- torch.empty(3 * state_size, input_features + state_size))
- self.bias = torch.nn.Parameter(torch.empty(3 * state_size))
- self.reset_parameters()
-
- def reset_parameters(self):
- stdv = 1.0 / math.sqrt(self.state_size)
- for weight in self.parameters():
- weight.data.uniform_(-stdv, +stdv)
-
- def forward(self, input, state):
- return LLTMFunction.apply(input, self.weights, self.bias, *state)
-
-Performance Comparison
-**********************
-
-Now that we are able to use and call our C++ code from PyTorch, we can run a
-small benchmark to see how much performance we gained from rewriting our op in
-C++. We'll run the LLTM forwards and backwards a few times and measure the
-duration::
-
- import torch
-
- batch_size = 16
- input_features = 32
- state_size = 128
-
- X = torch.randn(batch_size, input_features)
- h = torch.randn(batch_size, state_size)
- C = torch.randn(batch_size, state_size)
-
- rnn = LLTM(input_features, state_size)
-
- forward = 0
- backward = 0
- for _ in range(100000):
- start = time.time()
- new_h, new_C = rnn(X, (h, C))
- forward += time.time() - start
-
- start = time.time()
- (new_h.sum() + new_C.sum()).backward()
- backward += time.time() - start
-
- print('Forward: {:.3f} us | Backward {:.3f} us'.format(forward * 1e6/1e5, backward * 1e6/1e5))
-
-If we run this code with the original LLTM we wrote in pure Python at the start
-of this post, we get the following numbers (on my machine)::
-
- Forward: 506.480 us | Backward 444.694 us
-
-and with our new C++ version::
-
- Forward: 349.335 us | Backward 443.523 us
-
-We can already see a significant speedup for the forward function (more than
-30%). For the backward function a speedup is visible, albeit not major one. The
-backward pass I wrote above was not particularly optimized and could definitely
-be improved. Also, PyTorch's automatic differentiation engine can automatically
-parallelize computation graphs, may use a more efficient flow of operations
-overall, and is also implemented in C++, so it's expected to be fast.
-Nevertheless, this is a good start.
-
-Performance on GPU Devices
-**************************
-
-A wonderful fact about PyTorch's *ATen* backend is that it abstracts the
-computing device you are running on. This means the same code we wrote for CPU
-can *also* run on GPU, and individual operations will correspondingly dispatch
-to GPU-optimized implementations. For certain operations like matrix multiply
-(like ``mm`` or ``addmm``), this is a big win. Let's take a look at how much
-performance we gain from running our C++ code with CUDA tensors. No changes to
-our implementation are required, we simply need to put our tensors in GPU
-memory from Python, with either adding ``device=cuda_device`` argument at
-creation time or using ``.to(cuda_device)`` after creation::
-
- import torch
-
- assert torch.cuda.is_available()
- cuda_device = torch.device("cuda") # device object representing GPU
-
- batch_size = 16
- input_features = 32
- state_size = 128
-
- # Note the device=cuda_device arguments here
- X = torch.randn(batch_size, input_features, device=cuda_device)
- h = torch.randn(batch_size, state_size, device=cuda_device)
- C = torch.randn(batch_size, state_size, device=cuda_device)
-
- rnn = LLTM(input_features, state_size).to(cuda_device)
-
- forward = 0
- backward = 0
- for _ in range(100000):
- start = time.time()
- new_h, new_C = rnn(X, (h, C))
- torch.cuda.synchronize()
- forward += time.time() - start
-
- start = time.time()
- (new_h.sum() + new_C.sum()).backward()
- torch.cuda.synchronize()
- backward += time.time() - start
-
- print('Forward: {:.3f} us | Backward {:.3f} us'.format(forward * 1e6/1e5, backward * 1e6/1e5))
-
-Once more comparing our plain PyTorch code with our C++ version, now both
-running on CUDA devices, we again see performance gains. For Python/PyTorch::
-
- Forward: 187.719 us | Backward 410.815 us
-
-And C++/ATen::
-
- Forward: 149.802 us | Backward 393.458 us
-
-That's a great overall speedup compared to non-CUDA code. However, we can pull
-even more performance out of our C++ code by writing custom CUDA kernels, which
-we'll dive into soon. Before that, let's dicuss another way of building your C++
-extensions.
-
-JIT Compiling Extensions
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-Previously, I mentioned there were two ways of building C++ extensions: using
-:mod:`setuptools` or just in time (JIT). Having covered the former, let's
-elaborate on the latter. The JIT compilation mechanism provides you with a way
-of compiling and loading your extensions on the fly by calling a simple
-function in PyTorch's API called :func:`torch.utils.cpp_extension.load`. For
-the LLTM, this would look as simple as this::
-
- from torch.utils.cpp_extension import load
-
- lltm_cpp = load(name="lltm_cpp", sources=["lltm.cpp"])
-
-Here, we provide the function with the same information as for
-:mod:`setuptools`. In the background, this will do the following:
-
-1. Create a temporary directory ``/tmp/torch_extensions/lltm``,
-2. Emit a `Ninja `_ build file into that temporary directory,
-3. Compile your source files into a shared library,
-4. Import this shared library as a Python module.
-
-In fact, if you pass ``verbose=True`` to :func:`cpp_extension.load`, you will
-be informed about the process::
-
- Using /tmp/torch_extensions as PyTorch extensions root...
- Emitting ninja build file /tmp/torch_extensions/lltm_cpp/build.ninja...
- Building extension module lltm_cpp...
- Loading extension module lltm_cpp...
-
-The resulting Python module will be exactly the same as produced by setuptools,
-but removes the requirement of having to maintain a separate ``setup.py`` build
-file. If your setup is more complicated and you do need the full power of
-:mod:`setuptools`, you *can* write your own ``setup.py`` -- but in many cases
-this JIT technique will do just fine. The first time you run through this line,
-it will take some time, as the extension is compiling in the background. Since
-we use the Ninja build system to build your sources, re-compilation is
-incremental and thus re-loading the extension when you run your Python module a
-second time is fast and has low overhead if you didn't change the extension's
-source files.
-
-Writing a Mixed C++/CUDA extension
-----------------------------------
-
-To really take our implementation to the next level, we can hand-write parts of
-our forward and backward passes with custom CUDA kernels. For the LLTM, this has
-the prospect of being particularly effective, as there are a large number of
-pointwise operations in sequence, that can all be fused and parallelized in a
-single CUDA kernel. Let's see how we could write such a CUDA kernel and
-integrate it with PyTorch using this extension mechanism.
-
-The general strategy for writing a CUDA extension is to first write a C++ file
-which defines the functions that will be called from Python, and binds those
-functions to Python with pybind11. Furthermore, this file will also *declare*
-functions that are defined in CUDA (``.cu``) files. The C++ functions will then
-do some checks and ultimately forward its calls to the CUDA functions. In the
-CUDA files, we write our actual CUDA kernels. The :mod:`cpp_extension` package
-will then take care of compiling the C++ sources with a C++ compiler like
-``gcc`` and the CUDA sources with NVIDIA's ``nvcc`` compiler. This ensures that
-each compiler takes care of files it knows best to compile. Ultimately, they
-will be linked into one shared library that is available to us from Python
-code.
-
-We'll start with the C++ file, which we'll call ``lltm_cuda.cpp``, for example:
-
-.. code-block:: cpp
-
- #include
-
- #include
-
- // CUDA forward declarations
-
- std::vector lltm_cuda_forward(
- torch::Tensor input,
- torch::Tensor weights,
- torch::Tensor bias,
- torch::Tensor old_h,
- torch::Tensor old_cell);
-
- std::vector lltm_cuda_backward(
- torch::Tensor grad_h,
- torch::Tensor grad_cell,
- torch::Tensor new_cell,
- torch::Tensor input_gate,
- torch::Tensor output_gate,
- torch::Tensor candidate_cell,
- torch::Tensor X,
- torch::Tensor gate_weights,
- torch::Tensor weights);
-
- // C++ interface
-
- #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
- #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
- #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
-
- std::vector lltm_forward(
- torch::Tensor input,
- torch::Tensor weights,
- torch::Tensor bias,
- torch::Tensor old_h,
- torch::Tensor old_cell) {
- CHECK_INPUT(input);
- CHECK_INPUT(weights);
- CHECK_INPUT(bias);
- CHECK_INPUT(old_h);
- CHECK_INPUT(old_cell);
-
- return lltm_cuda_forward(input, weights, bias, old_h, old_cell);
- }
-
- std::vector lltm_backward(
- torch::Tensor grad_h,
- torch::Tensor grad_cell,
- torch::Tensor new_cell,
- torch::Tensor input_gate,
- torch::Tensor output_gate,
- torch::Tensor candidate_cell,
- torch::Tensor X,
- torch::Tensor gate_weights,
- torch::Tensor weights) {
- CHECK_INPUT(grad_h);
- CHECK_INPUT(grad_cell);
- CHECK_INPUT(input_gate);
- CHECK_INPUT(output_gate);
- CHECK_INPUT(candidate_cell);
- CHECK_INPUT(X);
- CHECK_INPUT(gate_weights);
- CHECK_INPUT(weights);
-
- return lltm_cuda_backward(
- grad_h,
- grad_cell,
- new_cell,
- input_gate,
- output_gate,
- candidate_cell,
- X,
- gate_weights,
- weights);
- }
-
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def("forward", &lltm_forward, "LLTM forward (CUDA)");
- m.def("backward", &lltm_backward, "LLTM backward (CUDA)");
- }
-
-As you can see, it is largely boilerplate, checks and forwarding to functions
-that we'll define in the CUDA file. We'll name this file
-``lltm_cuda_kernel.cu`` (note the ``.cu`` extension!). NVCC can reasonably
-compile C++11, thus we still have ATen and the C++ standard library available
-to us (but not ``torch.h``). Note that :mod:`setuptools` cannot handle files
-with the same name but different extensions, so if you use the ``setup.py``
-method instead of the JIT method, you must give your CUDA file a different name
-than your C++ file (for the JIT method, ``lltm.cpp`` and ``lltm.cu`` would work
-fine). Let's take a small peek at what this file will look like:
-
-.. code-block:: cpp
-
- #include
-
- #include
- #include
-
- #include
-
- template
- __device__ __forceinline__ scalar_t sigmoid(scalar_t z) {
- return 1.0 / (1.0 + exp(-z));
- }
-
-Here we see the headers I just described, as well as the fact that we are using
-CUDA-specific declarations like ``__device__`` and ``__forceinline__`` and
-functions like ``exp``. Let's continue with a few more helper functions that
-we'll need:
-
-.. code-block:: cpp
-
- template
- __device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) {
- const auto s = sigmoid(z);
- return (1.0 - s) * s;
- }
-
- template
- __device__ __forceinline__ scalar_t d_tanh(scalar_t z) {
- const auto t = tanh(z);
- return 1 - (t * t);
- }
-
- template
- __device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) {
- return fmax(0.0, z) + fmin(0.0, alpha * (exp(z) - 1.0));
- }
-
- template
- __device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) {
- const auto e = exp(z);
- const auto d_relu = z < 0.0 ? 0.0 : 1.0;
- return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0);
- }
-
-To now actually implement a function, we'll again need two things: one function
-that performs operations we don't wish to explicitly write by hand and calls
-into CUDA kernels, and then the actual CUDA kernel for the parts we want to
-speed up. For the forward pass, the first function should look like this:
-
-.. code-block:: cpp
-
- std::vector lltm_cuda_forward(
- torch::Tensor input,
- torch::Tensor weights,
- torch::Tensor bias,
- torch::Tensor old_h,
- torch::Tensor old_cell) {
- auto X = torch::cat({old_h, input}, /*dim=*/1);
- auto gates = torch::addmm(bias, X, weights.transpose(0, 1));
-
- const auto batch_size = old_cell.size(0);
- const auto state_size = old_cell.size(1);
-
- auto new_h = torch::zeros_like(old_cell);
- auto new_cell = torch::zeros_like(old_cell);
- auto input_gate = torch::zeros_like(old_cell);
- auto output_gate = torch::zeros_like(old_cell);
- auto candidate_cell = torch::zeros_like(old_cell);
-
- const int threads = 1024;
- const dim3 blocks((state_size + threads - 1) / threads, batch_size);
-
- AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] {
- lltm_cuda_forward_kernel<<>>(
- gates.data(),
- old_cell.data(),
- new_h.data(),
- new_cell.data(),
- input_gate.data(),
- output_gate.data(),
- candidate_cell.data(),
- state_size);
- }));
-
- return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates};
- }
-
-The main point of interest here is the ``AT_DISPATCH_FLOATING_TYPES`` macro and
-the kernel launch (indicated by the ``<<<...>>>``). While ATen abstracts away
-the device and datatype of the tensors we deal with, a tensor will, at runtime,
-still be backed by memory of a concrete type on a concrete device. As such, we
-need a way of determining at runtime what type a tensor is and then selectively
-call functions with the corresponding correct type signature. Done manually,
-this would (conceptually) look something like this:
-
-.. code-block:: cpp
-
- switch (tensor.type().scalarType()) {
- case torch::ScalarType::Double:
- return function(tensor.data());
- case torch::ScalarType::Float:
- return function(tensor.data());
- ...
- }
-
-The purpose of ``AT_DISPATCH_FLOATING_TYPES`` is to take care of this dispatch
-for us. It takes a type (``gates.type()`` in our case), a name (for error
-messages) and a lambda function. Inside this lambda function, the type alias
-``scalar_t`` is available and is defined as the type that the tensor actually
-is at runtime in that context. As such, if we have a template function (which
-our CUDA kernel will be), we can instantiate it with this ``scalar_t`` alias,
-and the correct function will be called. In this case, we also want to retrieve
-the data pointers of the tensors as pointers of that ``scalar_t`` type. If you
-wanted to dispatch over all types and not just floating point types (``Float``
-and ``Double``), you can use ``AT_DISPATCH_ALL_TYPES``.
-
-Note that we perform some operations with plain ATen. These operations will
-still run on the GPU, but using ATen's default implementations. This makes
-sense, because ATen will use highly optimized routines for things like matrix
-multiplies (e.g. ``addmm``) or convolutions which would be much harder to
-implement and improve ourselves.
-
-As for the kernel launch itself, we are here specifying that each CUDA block
-will have 1024 threads, and that the entire GPU grid is split into as many
-blocks of ``1 x 1024`` threads as are required to fill our matrices with one
-thread per component. For example, if our state size was 2048 and our batch
-size 4, we'd launch a total of ``4 x 2 = 8`` blocks with each 1024 threads. If
-you've never heard of CUDA "blocks" or "grids" before, an `introductory read
-about CUDA `_ may
-help.
-
-The actual CUDA kernel is fairly simple (if you've ever programmed GPUs before):
-
-.. code-block:: cpp
-
- template
- __global__ void lltm_cuda_forward_kernel(
- const scalar_t* __restrict__ gates,
- const scalar_t* __restrict__ old_cell,
- scalar_t* __restrict__ new_h,
- scalar_t* __restrict__ new_cell,
- scalar_t* __restrict__ input_gate,
- scalar_t* __restrict__ output_gate,
- scalar_t* __restrict__ candidate_cell,
- size_t state_size) {
- const int column = blockIdx.x * blockDim.x + threadIdx.x;
- const int index = blockIdx.y * state_size + column;
- const int gates_row = blockIdx.y * (state_size * 3);
- if (column < state_size) {
- input_gate[index] = sigmoid(gates[gates_row + column]);
- output_gate[index] = sigmoid(gates[gates_row + state_size + column]);
- candidate_cell[index] = elu(gates[gates_row + 2 * state_size + column]);
- new_cell[index] =
- old_cell[index] + candidate_cell[index] * input_gate[index];
- new_h[index] = tanh(new_cell[index]) * output_gate[index];
- }
- }
-
-What's primarily interesting here is that we are able to compute all of these
-pointwise operations entirely in parallel for each individual component in our
-gate matrices. If you imagine having to do this with a giant ``for`` loop over
-a million elements in serial, you can see why this would be much faster.
-
-Using accessors
-^^^^^^^^^^^^^^^
-
-You can see in the CUDA kernel that we work directly on pointers with the right
-type. Indeed, working directly with high level type agnostic tensors inside cuda
-kernels would be very inefficient.
-
-However, this comes at a cost of ease of use and readibility, especially for
-highly dimensional data. In our example, we know for example that the contiguous
-``gates`` tensor has 3 dimensions:
-
-1. batch, size of ``batch_size`` and stride of ``3*state_size``
-2. row, size of ``3`` and stride of ``state_size``
-3. index, size of ``state_size`` and stride of ``1``
-
-How can we access the element ``gates[n][row][column]`` inside the kernel then?
-It turns out that you need the strides to access your element with some simple
-arithmetic.
-
-.. code-block:: cpp
-
- gates.data()[n*3*state_size + row*state_size + column]
-
-
-In addition to being verbose, this expression needs stride to be explicitely
-known, and thus passed to the kernel function within its arguments. You can see
-that in the case of kernel functions accepting multiple tensors with different
-sizes you will end up with a very long list of arguments.
-
-Fortunately for us, ATen provides accessors that are created with a single
-dynamic check that a Tensor is the type and number of dimensions.
-Accessors then expose an API for accessing the Tensor elements efficiently
-without having to convert to a single pointer:
-
-.. code-block:: cpp
-
- torch::Tensor foo = torch::rand({12, 12});
-
- // assert foo is 2-dimensional and holds floats.
- auto foo_a = foo.accessor();
- float trace = 0;
-
- for(int i = 0; i < foo_a.size(0); i++) {
- // use the accessor foo_a to get tensor data.
- trace += foo_a[i][i];
- }
-
-Accessor objects have a relatively high level interface, with ``.size()`` and
-``.stride()`` methods and multi-dimensional indexing. The ``.accessor<>``
-interface is designed to access data efficiently on cpu tensor. The equivalent
-for cuda tensors is the ``packed_accessor<>``, which produces a Packed Accessor.
-
-The fundamental difference with Accessor is that a Packed Accessor copies size
-and stride data inside of its structure instead of pointing to it. It allows us
-to pass it to a CUDA kernel function and use its interface inside it.
-
-We can design a function that takes Packed Accessors instead of pointers.
-
-.. code-block:: cpp
-
- __global__ void lltm_cuda_forward_kernel(
- const torch::PackedTensorAccessor gates,
- const torch::PackedTensorAccessor old_cell,
- torch::PackedTensorAccessor new_h,
- torch::PackedTensorAccessor new_cell,
- torch::PackedTensorAccessor input_gate,
- torch::PackedTensorAccessor output_gate,
- torch::PackedTensorAccessor candidate_cell)
-
-Let's decompose the template used here. the first two arguments ``scalar_t`` and
-``2`` are the same as regular Accessor. The argument
-``torch::RestrictPtrTraits`` indicates that the ``__restrict__`` keyword must be
-used. Finally, the argument ``size_t`` indicates that sizes and strides must be
-stored in a ``size_t`` integer. This is important as by default ``int64_t`` is
-used and can make the kernel slower.
-
-The function declaration becomes
-
-.. code-block:: cpp
-
- template
- __global__ void lltm_cuda_forward_kernel(
- const torch::PackedTensorAccessor gates,
- const torch::PackedTensorAccessor old_cell,
- torch::PackedTensorAccessor new_h,
- torch::PackedTensorAccessor new_cell,
- torch::PackedTensorAccessor input_gate,
- torch::PackedTensorAccessor output_gate,
- torch::PackedTensorAccessor candidate_cell) {
- //batch index
- const int n = blockIdx.y;
- // column index
- const int c = blockIdx.x * blockDim.x + threadIdx.x;
- if (c < gates.size(2)){
- input_gate[n][c] = sigmoid(gates[n][0][c]);
- output_gate[n][c] = sigmoid(gates[n][1][c]);
- candidate_cell[n][c] = elu(gates[n][2][c]);
- new_cell[n][c] =
- old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c];
- new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c];
- }
- }
-
-The implementation is much more readable! This function is then called by
-creating Packed Accessors with the ``.packed_accessor<>`` method within the
-host function.
-
-.. code-block:: cpp
-
- std::vector lltm_cuda_forward(
- torch::Tensor input,
- torch::Tensor weights,
- torch::Tensor bias,
- torch::Tensor old_h,
- torch::Tensor old_cell) {
- auto X = torch::cat({old_h, input}, /*dim=*/1);
- auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1));
-
- const auto batch_size = old_cell.size(0);
- const auto state_size = old_cell.size(1);
-
- auto gates = gate_weights.reshape({batch_size, 3, state_size});
- auto new_h = torch::zeros_like(old_cell);
- auto new_cell = torch::zeros_like(old_cell);
- auto input_gate = torch::zeros_like(old_cell);
- auto output_gate = torch::zeros_like(old_cell);
- auto candidate_cell = torch::zeros_like(old_cell);
-
- const int threads = 1024;
- const dim3 blocks((state_size + threads - 1) / threads, batch_size);
-
- AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] {
- lltm_cuda_forward_kernel<<>>(
- gates.packed_accessor(),
- old_cell.packed_accessor(),
- new_h.packed_accessor(),
- new_cell.packed_accessor(),
- input_gate.packed_accessor(),
- output_gate.packed_accessor(),
- candidate_cell.packed_accessor());
- }));
-
- return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates};
- }
-
-The backwards pass follows much the same pattern and I won't elaborate further
-on it:
-
-.. code-block:: cpp
-
- template
- __global__ void lltm_cuda_backward_kernel(
- torch::PackedTensorAccessor d_old_cell,
- torch::PackedTensorAccessor d_gates,
- const torch::PackedTensorAccessor grad_h,
- const torch::PackedTensorAccessor grad_cell,
- const torch::PackedTensorAccessor new_cell,
- const torch::PackedTensorAccessor input_gate,
- const torch::PackedTensorAccessor output_gate,
- const torch::PackedTensorAccessor candidate_cell,
- const torch::PackedTensorAccessor gate_weights) {
- //batch index
- const int n = blockIdx.y;
- // column index
- const int c = blockIdx.x * blockDim.x + threadIdx.x;
- if (c < d_gates.size(2)){
- const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c];
- const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c];
- const auto d_new_cell =
- d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c];
-
-
- d_old_cell[n][c] = d_new_cell;
- const auto d_candidate_cell = input_gate[n][c] * d_new_cell;
- const auto d_input_gate = candidate_cell[n][c] * d_new_cell;
-
- d_gates[n][0][c] =
- d_input_gate * d_sigmoid(gate_weights[n][0][c]);
- d_gates[n][1][c] =
- d_output_gate * d_sigmoid(gate_weights[n][1][c]);
- d_gates[n][2][c] =
- d_candidate_cell * d_elu(gate_weights[n][2][c]);
- }
- }
-
- std::vector lltm_cuda_backward(
- torch::Tensor grad_h,
- torch::Tensor grad_cell,
- torch::Tensor new_cell,
- torch::Tensor input_gate,
- torch::Tensor output_gate,
- torch::Tensor candidate_cell,
- torch::Tensor X,
- torch::Tensor gates,
- torch::Tensor weights) {
- auto d_old_cell = torch::zeros_like(new_cell);
- auto d_gates = torch::zeros_like(gates);
-
- const auto batch_size = new_cell.size(0);
- const auto state_size = new_cell.size(1);
-
- const int threads = 1024;
- const dim3 blocks((state_size + threads - 1) / threads, batch_size);
-
- AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] {
- lltm_cuda_backward_kernel<<>>(
- d_old_cell.packed_accessor(),
- d_gates.packed_accessor(),
- grad_h.packed_accessor(),
- grad_cell.packed_accessor(),
- new_cell.packed_accessor(),
- input_gate.packed_accessor(),
- output_gate.packed_accessor(),
- candidate_cell.packed_accessor(),
- gates.packed_accessor());
- }));
-
- auto d_gate_weights = d_gates.reshape({batch_size, 3*state_size});
- auto d_weights = d_gate_weights.t().mm(X);
- auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true);
-
- auto d_X = d_gate_weights.mm(weights);
- auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size);
- auto d_input = d_X.slice(/*dim=*/1, state_size);
-
- return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates};
- }
-
-
-Integrating a C++/CUDA Operation with PyTorch
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Integration of our CUDA-enabled op with PyTorch is again very straightforward.
-If you want to write a ``setup.py`` script, it could look like this::
-
- from setuptools import setup
- from torch.utils.cpp_extension import BuildExtension, CUDAExtension
-
- setup(
- name='lltm',
- ext_modules=[
- CUDAExtension('lltm_cuda', [
- 'lltm_cuda.cpp',
- 'lltm_cuda_kernel.cu',
- ])
- ],
- cmdclass={
- 'build_ext': BuildExtension
- })
-
-Instead of :func:`CppExtension`, we now use :func:`CUDAExtension`. We can just
-specify the ``.cu`` file along with the ``.cpp`` files -- the library takes
-care of all the hassle this entails for you. The JIT mechanism is even
-simpler::
-
- from torch.utils.cpp_extension import load
-
- lltm = load(name='lltm', sources=['lltm_cuda.cpp', 'lltm_cuda_kernel.cu'])
-
-Performance Comparison
-**********************
-
-Our hope was that parallelizing and fusing the pointwise operations of our code
-with CUDA would improve the performance of our LLTM. Let's see if that holds
-true. We can run the code I listed earlier to run a benchmark. Our fastest
-version earlier was the CUDA-based C++ code::
-
- Forward: 149.802 us | Backward 393.458 us
-
-
-And now with our custom CUDA kernel::
-
- Forward: 129.431 us | Backward 304.641 us
-
-More performance increases!
-
-Conclusion
-----------
-
-You should now be equipped with a good overview of PyTorch's C++ extension
-mechanism as well as a motivation for using them. You can find the code
-examples displayed in this note `here
-`_. If you have questions, please use
-`the forums `_. Also be sure to check our `FAQ
-`_ in case you run into any issues.
diff --git a/advanced_source/cpp_frontend.rst b/advanced_source/cpp_frontend.rst
index bda9f3f256e..968afa01b23 100644
--- a/advanced_source/cpp_frontend.rst
+++ b/advanced_source/cpp_frontend.rst
@@ -1,11 +1,31 @@
+.. _cpp-frontend-tutorial:
+
Using the PyTorch C++ Frontend
==============================
+**Author:** `Peter Goldsborough `_
+
+.. grid:: 2
+
+ .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn
+ :class-card: card-prerequisites
+
+ * How to build a C++ application that utilizes the PyTorch C++ frontend
+ * How to define and train neural networks from C++ using PyTorch abstractions
+
+ .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites
+ :class-card: card-prerequisites
+
+ * PyTorch 1.5 or later
+ * Basic understanding of C++ programming
+ * Basic Ubuntu Linux environment with CMake >= 3.5; similar commands will work in a MacOS / Windows environment
+ * (Optional) A CUDA-based GPU for the GPU training sections
+
The PyTorch C++ frontend is a pure C++ interface to the PyTorch machine learning
framework. While the primary interface to PyTorch naturally is Python, this
Python API sits atop a substantial C++ codebase providing foundational data
structures and functionality such as tensors and automatic differentiation. The
-C++ frontend exposes a pure C++11 API that extends this underlying C++ codebase
+C++ frontend exposes a pure C++17 API that extends this underlying C++ codebase
with tools required for machine learning training and inference. This includes a
built-in collection of common components for neural network modeling; an API to
extend this collection with custom modules; a library of popular optimization
@@ -57,7 +77,7 @@ the right tool for the job. Examples for such environments include:
Multiprocessing is an alternative, but not as scalable and has significant
shortcomings. C++ has no such constraints and threads are easy to use and
create. Models requiring heavy parallelization, like those used in `Deep
- Neuroevolution `_, can benefit from
+ Neuroevolution `_, can benefit from
this.
- **Existing C++ Codebases**: You may be the owner of an existing C++
application doing anything from serving web pages in a backend server to
@@ -105,6 +125,8 @@ environment, however you are free to follow along on MacOS or Windows too.
.. tip::
On Windows, debug and release builds are not ABI-compatible. If you plan to
build your project in debug mode, please try the debug version of LibTorch.
+ Also, make sure you specify the correct configuration in the ``cmake --build .``
+ line below.
The first step is to download the LibTorch distribution locally, via the link
retrieved from the PyTorch website. For a vanilla Ubuntu Linux environment, this
@@ -135,14 +157,14 @@ on we'll use this ``CMakeLists.txt`` file:
.. code-block:: cmake
- cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
+ cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
project(dcgan)
find_package(Torch REQUIRED)
add_executable(dcgan dcgan.cpp)
target_link_libraries(dcgan "${TORCH_LIBRARIES}")
- set_property(TARGET dcgan PROPERTY CXX_STANDARD 11)
+ set_property(TARGET dcgan PROPERTY CXX_STANDARD 17)
.. note::
@@ -201,7 +223,7 @@ corresponding absolute path. Now, we are ready to build our application:
-- Configuring done
-- Generating done
-- Build files have been written to: /home/build
- root@fa350df05ecf:/home/build# make -j
+ root@fa350df05ecf:/home/build# cmake --build . --config Release
Scanning dependencies of target dcgan
[ 50%] Building CXX object CMakeFiles/dcgan.dir/dcgan.cpp.o
[100%] Linking CXX executable dcgan
@@ -209,9 +231,9 @@ corresponding absolute path. Now, we are ready to build our application:
Above, we first created a ``build`` folder inside of our ``dcgan`` directory,
entered this folder, ran the ``cmake`` command to generate the necessary build
-(Make) files and finally compiled the project successfully by running ``make
--j``. We are now all set to execute our minimal binary and complete this section
-on basic project configuration:
+(Make) files and finally compiled the project successfully by running ``cmake
+--build . --config Release``. We are now all set to execute our minimal binary
+and complete this section on basic project configuration:
.. code-block:: shell
@@ -660,7 +682,7 @@ Defining the DCGAN Modules
We now have the necessary background and introduction to define the modules for
the machine learning task we want to solve in this post. To recap: our task is
to generate images of digits from the `MNIST dataset
- `_. We want to use a `generative adversarial
+`_. We want to use a `generative adversarial
network (GAN)
`_ to solve
this task. In particular, we'll use a `DCGAN architecture
@@ -696,136 +718,78 @@ The Generator Module
********************
We begin by defining the generator module, which consists of a series of
-transposed 2D convolutions, batch normalizations and ReLU activation units. Like
-in Python, PyTorch here provides two APIs for model definition: a functional one
-where inputs are passed through successive functions, and a more object-oriented
-one where we build a ``Sequential`` module containing the entire model as
-submodules. Let's see how our generator looks with either API, and you can
-decide for yourself which one you prefer. First, using ``Sequential``:
+transposed 2D convolutions, batch normalizations and ReLU activation units.
+We explicitly pass inputs (in a functional way) between modules in the
+``forward()`` method of a module we define ourselves:
.. code-block:: cpp
- using namespace torch;
-
- nn::Sequential generator(
- // Layer 1
- nn::Conv2d(nn::Conv2dOptions(kNoiseSize, 256, 4)
- .with_bias(false)
- .transposed(true)),
- nn::BatchNorm(256),
- nn::Functional(torch::relu),
- // Layer 2
- nn::Conv2d(nn::Conv2dOptions(256, 128, 3)
- .stride(2)
- .padding(1)
- .with_bias(false)
- .transposed(true)),
- nn::BatchNorm(128),
- nn::Functional(torch::relu),
- // Layer 3
- nn::Conv2d(nn::Conv2dOptions(128, 64, 4)
- .stride(2)
- .padding(1)
- .with_bias(false)
- .transposed(true)),
- nn::BatchNorm(64),
- nn::Functional(torch::relu),
- // Layer 4
- nn::Conv2d(nn::Conv2dOptions(64, 1, 4)
- .stride(2)
- .padding(1)
- .with_bias(false)
- .transposed(true)),
- nn::Functional(torch::tanh));
-
-.. tip::
-
- A ``Sequential`` module simply performs function composition. The output of
- the first submodule becomes the input of the second, the output of the third
- becomes the input of the fourth and so on.
-
-The particular modules chosen, like ``nn::Conv2d`` and ``nn::BatchNorm``,
-follows the structure outlined earlier. The ``kNoiseSize`` constant determines
-the size of the input noise vector and is set to ``100``. Notice also that we
-use the ``torch::nn::Functional`` module for our activation functions, passing
-it ``torch::relu`` for inner layers and ``torch::tanh`` as the final activation.
-Hyperparameters were, of course, found via grad student descent.
-
-.. note::
-
- The Python frontend has one module for each activation function, like
- ``torch.nn.ReLU`` or ``torch.nn.Tanh``. In C++, we instead only provide the
- ``Functional`` module, to which you can pass any C++ function that will be
- called inside the ``Functional``'s ``forward()`` method.
-
-.. attention::
-
- No grad students were harmed in the discovery of hyperparameters. They were
- fed Soylent regularly.
-
-For the second approach, we explicitly pass inputs (in a functional way) between
-modules in the ``forward()`` method of a module we define ourselves:
-
-.. code-block:: cpp
-
- struct GeneratorImpl : nn::Module {
- GeneratorImpl()
- : conv1(nn::Conv2dOptions(kNoiseSize, 512, 4)
- .with_bias(false)
- .transposed(true)),
- batch_norm1(512),
- conv2(nn::Conv2dOptions(512, 256, 4)
- .stride(2)
- .padding(1)
- .with_bias(false)
- .transposed(true)),
- batch_norm2(256),
- conv3(nn::Conv2dOptions(256, 128, 4)
+ struct DCGANGeneratorImpl : nn::Module {
+ DCGANGeneratorImpl(int kNoiseSize)
+ : conv1(nn::ConvTranspose2dOptions(kNoiseSize, 256, 4)
+ .bias(false)),
+ batch_norm1(256),
+ conv2(nn::ConvTranspose2dOptions(256, 128, 3)
.stride(2)
.padding(1)
- .with_bias(false)
- .transposed(true)),
- batch_norm3(128),
- conv4(nn::Conv2dOptions(128, 64, 4)
+ .bias(false)),
+ batch_norm2(128),
+ conv3(nn::ConvTranspose2dOptions(128, 64, 4)
.stride(2)
.padding(1)
- .with_bias(false)
- .transposed(true)),
- batch_norm4(64),
- conv5(nn::Conv2dOptions(64, 1, 4)
+ .bias(false)),
+ batch_norm3(64),
+ conv4(nn::ConvTranspose2dOptions(64, 1, 4)
.stride(2)
.padding(1)
- .with_bias(false)
- .transposed(true)) {}
-
- torch::Tensor forward(torch::Tensor x) {
- x = torch::relu(batch_norm1(conv1(x)));
- x = torch::relu(batch_norm2(conv2(x)));
- x = torch::relu(batch_norm3(conv3(x)));
- x = torch::relu(batch_norm4(conv4(x)));
- x = torch::tanh(conv5(x));
- return x;
- }
-
- nn::Conv2d conv1, conv2, conv3, conv4, conv5;
- nn::BatchNorm batch_norm1, batch_norm2, batch_norm3, batch_norm4;
+ .bias(false))
+ {
+ // register_module() is needed if we want to use the parameters() method later on
+ register_module("conv1", conv1);
+ register_module("conv2", conv2);
+ register_module("conv3", conv3);
+ register_module("conv4", conv4);
+ register_module("batch_norm1", batch_norm1);
+ register_module("batch_norm2", batch_norm2);
+ register_module("batch_norm3", batch_norm3);
+ }
+
+ torch::Tensor forward(torch::Tensor x) {
+ x = torch::relu(batch_norm1(conv1(x)));
+ x = torch::relu(batch_norm2(conv2(x)));
+ x = torch::relu(batch_norm3(conv3(x)));
+ x = torch::tanh(conv4(x));
+ return x;
+ }
+
+ nn::ConvTranspose2d conv1, conv2, conv3, conv4;
+ nn::BatchNorm2d batch_norm1, batch_norm2, batch_norm3;
};
- TORCH_MODULE(Generator);
+ TORCH_MODULE(DCGANGenerator);
+
+ DCGANGenerator generator(kNoiseSize);
+
+We can now invoke ``forward()`` on the ``DCGANGenerator`` to map a noise sample to an image.
+
+The particular modules chosen, like ``nn::ConvTranspose2d`` and ``nn::BatchNorm2d``,
+follows the structure outlined earlier. The ``kNoiseSize`` constant determines
+the size of the input noise vector and is set to ``100``. Hyperparameters were,
+of course, found via grad student descent.
- Generator generator;
+.. attention::
-Whichever approach we use, we can now invoke ``forward()`` on the ``Generator`` to
-map a noise sample to an image.
+ No grad students were harmed in the discovery of hyperparameters. They were
+ fed Soylent regularly.
.. note::
A brief word on the way options are passed to built-in modules like ``Conv2d``
in the C++ frontend: Every module has some required options, like the number
- of features for ``BatchNorm``. If you only need to configure the required
+ of features for ``BatchNorm2d``. If you only need to configure the required
options, you can pass them directly to the module's constructor, like
- ``BatchNorm(128)`` or ``Dropout(0.5)`` or ``Conv2d(8, 4, 2)`` (for input
+ ``BatchNorm2d(128)`` or ``Dropout(0.5)`` or ``Conv2d(8, 4, 2)`` (for input
channel count, output channel count, and kernel size). If, however, you need
- to modify other options, which are normally defaulted, such as ``with_bias``
+ to modify other options, which are normally defaulted, such as ``bias``
for ``Conv2d``, you need to construct and pass an *options* object. Every
module in the C++ frontend has an associated options struct, called
``ModuleOptions`` where ``Module`` is the name of the module, like
@@ -840,36 +804,42 @@ and activations. However, the convolutions are now regular ones instead of
transposed, and we use a leaky ReLU with an alpha value of 0.2 instead of a
vanilla ReLU. Also, the final activation becomes a Sigmoid, which squashes
values into a range between 0 and 1. We can then interpret these squashed values
-as the probabilities the discriminator assigns to images being real:
+as the probabilities the discriminator assigns to images being real.
+
+To build the discriminator, we will try something different: a `Sequential` module.
+Like in Python, PyTorch here provides two APIs for model definition: a functional one
+where inputs are passed through successive functions (e.g. the generator module example),
+and a more object-oriented one where we build a `Sequential` module containing the
+entire model as submodules. Using `Sequential`, the discriminator would look like:
.. code-block:: cpp
nn::Sequential discriminator(
// Layer 1
nn::Conv2d(
- nn::Conv2dOptions(1, 64, 4).stride(2).padding(1).with_bias(false)),
- nn::Functional(torch::leaky_relu, 0.2),
+ nn::Conv2dOptions(1, 64, 4).stride(2).padding(1).bias(false)),
+ nn::LeakyReLU(nn::LeakyReLUOptions().negative_slope(0.2)),
// Layer 2
nn::Conv2d(
- nn::Conv2dOptions(64, 128, 4).stride(2).padding(1).with_bias(false)),
- nn::BatchNorm(128),
- nn::Functional(torch::leaky_relu, 0.2),
+ nn::Conv2dOptions(64, 128, 4).stride(2).padding(1).bias(false)),
+ nn::BatchNorm2d(128),
+ nn::LeakyReLU(nn::LeakyReLUOptions().negative_slope(0.2)),
// Layer 3
nn::Conv2d(
- nn::Conv2dOptions(128, 256, 4).stride(2).padding(1).with_bias(false)),
- nn::BatchNorm(256),
- nn::Functional(torch::leaky_relu, 0.2),
+ nn::Conv2dOptions(128, 256, 4).stride(2).padding(1).bias(false)),
+ nn::BatchNorm2d(256),
+ nn::LeakyReLU(nn::LeakyReLUOptions().negative_slope(0.2)),
// Layer 4
nn::Conv2d(
- nn::Conv2dOptions(256, 1, 3).stride(1).padding(0).with_bias(false)),
- nn::Functional(torch::sigmoid));
+ nn::Conv2dOptions(256, 1, 3).stride(1).padding(0).bias(false)),
+ nn::Sigmoid());
-.. note::
+.. tip::
+
+ A ``Sequential`` module simply performs function composition. The output of
+ the first submodule becomes the input of the second, the output of the third
+ becomes the input of the fourth and so on.
- When the function we pass to ``Functional`` takes more arguments than a single
- tensor, we can pass them to the ``Functional`` constructor, which will forward
- them to each function call. For the leaky ReLU above, this means
- ``torch::leaky_relu(previous_output_tensor, 0.2)`` is called.
Loading Data
------------
@@ -909,7 +879,7 @@ stacks them into a single tensor along the first dimension:
Note that the MNIST dataset should be located in the ``./mnist`` directory
relative to wherever you execute the training binary from. You can use `this
-script `_
+script `_
to download the MNIST dataset.
Next, we create a data loader and pass it this dataset. To make a new data
@@ -996,9 +966,9 @@ we use implement the `Adam `_ algorithm:
.. code-block:: cpp
torch::optim::Adam generator_optimizer(
- generator->parameters(), torch::optim::AdamOptions(2e-4).beta1(0.5));
+ generator->parameters(), torch::optim::AdamOptions(2e-4).betas(std::make_tuple(0.5, 0.5)));
torch::optim::Adam discriminator_optimizer(
- discriminator->parameters(), torch::optim::AdamOptions(5e-4).beta1(0.5));
+ discriminator->parameters(), torch::optim::AdamOptions(5e-4).betas(std::make_tuple(0.5, 0.5)));
.. note::
@@ -1019,7 +989,7 @@ the data loader every epoch and then write the GAN training code:
discriminator->zero_grad();
torch::Tensor real_images = batch.data;
torch::Tensor real_labels = torch::empty(batch.data.size(0)).uniform_(0.8, 1.0);
- torch::Tensor real_output = discriminator->forward(real_images);
+ torch::Tensor real_output = discriminator->forward(real_images).reshape(real_labels.sizes());
torch::Tensor d_loss_real = torch::binary_cross_entropy(real_output, real_labels);
d_loss_real.backward();
@@ -1027,7 +997,7 @@ the data loader every epoch and then write the GAN training code:
torch::Tensor noise = torch::randn({batch.data.size(0), kNoiseSize, 1, 1});
torch::Tensor fake_images = generator->forward(noise);
torch::Tensor fake_labels = torch::zeros(batch.data.size(0));
- torch::Tensor fake_output = discriminator->forward(fake_images.detach());
+ torch::Tensor fake_output = discriminator->forward(fake_images.detach()).reshape(fake_labels.sizes());
torch::Tensor d_loss_fake = torch::binary_cross_entropy(fake_output, fake_labels);
d_loss_fake.backward();
@@ -1037,7 +1007,7 @@ the data loader every epoch and then write the GAN training code:
// Train generator.
generator->zero_grad();
fake_labels.fill_(1);
- fake_output = discriminator->forward(fake_images);
+ fake_output = discriminator->forward(fake_images).reshape(fake_labels.sizes());
torch::Tensor g_loss = torch::binary_cross_entropy(fake_output, fake_labels);
g_loss.backward();
generator_optimizer.step();
@@ -1065,7 +1035,7 @@ probabilities.
is called *label smoothing*.
Before evaluating the discriminator, we zero out the gradients of its
-parameters. After computing the loss, we back-propagate through the network by
+parameters. After computing the loss, we back-propagate it through the network by
calling ``d_loss.backward()`` to compute new gradients. We repeat this spiel for
the fake images. Instead of using images from the dataset, we let the generator
create fake images for this by feeding it a batch of random noise. We then
@@ -1266,9 +1236,6 @@ tensors and display them with matplotlib:
.. code-block:: python
- from __future__ import print_function
- from __future__ import unicode_literals
-
import argparse
import matplotlib.pyplot as plt
@@ -1320,7 +1287,7 @@ Let's now train our model for around 30 epochs:
-> checkpoint 120
[30/30][938/938] D_loss: 0.3610 | G_loss: 3.8084
-And display the imags in a plot:
+And display the images in a plot:
.. code-block:: shell
diff --git a/advanced_source/custom_class_pt2.rst b/advanced_source/custom_class_pt2.rst
new file mode 100644
index 00000000000..229a94f2ce9
--- /dev/null
+++ b/advanced_source/custom_class_pt2.rst
@@ -0,0 +1,275 @@
+Supporting Custom C++ Classes in torch.compile/torch.export
+===========================================================
+
+
+This tutorial is a follow-on to the
+:doc:`custom C++ classes ` tutorial, and
+introduces additional steps that are needed to support custom C++ classes in
+torch.compile/torch.export.
+
+.. warning::
+
+ This feature is in prototype status and is subject to backwards compatibility
+ breaking changes. This tutorial provides a snapshot as of PyTorch 2.8. If
+ you run into any issues, please reach out to us on Github!
+
+Concretely, there are a few steps:
+
+1. Implement an ``__obj_flatten__`` method to the C++ custom class
+ implementation to allow us to inspect its states and guard the changes. The
+ method should return a tuple of tuple of attribute_name, value
+ (``tuple[tuple[str, value] * n]``).
+
+2. Register a python fake class using ``@torch._library.register_fake_class``
+
+ a. Implement “fake methods” of each of the class’s c++ methods, which should
+ have the same schema as the C++ implementation.
+
+ b. Additionally, implement an ``__obj_unflatten__`` classmethod in the Python
+ fake class to tell us how to create a fake class from the flattened
+ states returned by ``__obj_flatten__``.
+
+Here is a breakdown of the diff. Following the guide in
+:doc:`Extending TorchScript with Custom C++ Classes `,
+we can create a thread-safe tensor queue and build it.
+
+.. code-block:: cpp
+
+ // Thread-safe Tensor Queue
+
+ #include
+ #include
+
+ #include
+ #include
+ #include
+
+ using namespace torch::jit;
+
+ // Thread-safe Tensor Queue
+ struct TensorQueue : torch::CustomClassHolder {
+ explicit TensorQueue(at::Tensor t) : init_tensor_(t) {}
+
+ explicit TensorQueue(c10::Dict dict) {
+ init_tensor_ = dict.at(std::string("init_tensor"));
+ const std::string key = "queue";
+ at::Tensor size_tensor;
+ size_tensor = dict.at(std::string(key + "/size")).cpu();
+ const auto* size_tensor_acc = size_tensor.const_data_ptr();
+ int64_t queue_size = size_tensor_acc[0];
+
+ for (const auto index : c10::irange(queue_size)) {
+ at::Tensor val;
+ queue_[index] = dict.at(key + "/" + std::to_string(index));
+ queue_.push_back(val);
+ }
+ }
+
+ // Push the element to the rear of queue.
+ // Lock is added for thread safe.
+ void push(at::Tensor x) {
+ std::lock_guard guard(mutex_);
+ queue_.push_back(x);
+ }
+ // Pop the front element of queue and return it.
+ // If empty, return init_tensor_.
+ // Lock is added for thread safe.
+ at::Tensor pop() {
+ std::lock_guard guard(mutex_);
+ if (!queue_.empty()) {
+ auto val = queue_.front();
+ queue_.pop_front();
+ return val;
+ } else {
+ return init_tensor_;
+ }
+ }
+
+ std::vector get_raw_queue() {
+ std::vector raw_queue(queue_.begin(), queue_.end());
+ return raw_queue;
+ }
+
+ private:
+ std::deque queue_;
+ std::mutex mutex_;
+ at::Tensor init_tensor_;
+ };
+
+ // The torch binding code
+ TORCH_LIBRARY(MyCustomClass, m) {
+ m.class_("TensorQueue")
+ .def(torch::init())
+ .def("push", &TensorQueue::push)
+ .def("pop", &TensorQueue::pop)
+ .def("get_raw_queue", &TensorQueue::get_raw_queue);
+ }
+
+**Step 1**: Add an ``__obj_flatten__`` method to the C++ custom class implementation:
+
+.. code-block:: cpp
+
+ // Thread-safe Tensor Queue
+ struct TensorQueue : torch::CustomClassHolder {
+ ...
+ std::tuple>, std::tuple> __obj_flatten__() {
+ return std::tuple(std::tuple("queue", this->get_raw_queue()), std::tuple("init_tensor_", this->init_tensor_.clone()));
+ }
+ ...
+ };
+
+ TORCH_LIBRARY(MyCustomClass, m) {
+ m.class_("TensorQueue")
+ .def(torch::init())
+ ...
+ .def("__obj_flatten__", &TensorQueue::__obj_flatten__);
+ }
+
+**Step 2a**: Register a fake class in Python that implements each method.
+
+.. code-block:: python
+
+ # namespace::class_name
+ @torch._library.register_fake_class("MyCustomClass::TensorQueue")
+ class FakeTensorQueue:
+ def __init__(
+ self,
+ queue: List[torch.Tensor],
+ init_tensor_: torch.Tensor
+ ) -> None:
+ self.queue = queue
+ self.init_tensor_ = init_tensor_
+
+ def push(self, tensor: torch.Tensor) -> None:
+ self.queue.append(tensor)
+
+ def pop(self) -> torch.Tensor:
+ if len(self.queue) > 0:
+ return self.queue.pop(0)
+ return self.init_tensor_
+
+**Step 2b**: Implement an ``__obj_unflatten__`` classmethod in Python.
+
+.. code-block:: python
+
+ # namespace::class_name
+ @torch._library.register_fake_class("MyCustomClass::TensorQueue")
+ class FakeTensorQueue:
+ ...
+ @classmethod
+ def __obj_unflatten__(cls, flattened_tq):
+ return cls(**dict(flattened_tq))
+
+
+That’s it! Now we can create a module that uses this object and run it with ``torch.compile`` or ``torch.export``.
+
+.. code-block:: python
+
+ import torch
+
+ torch.classes.load_library("build/libcustom_class.so")
+ tq = torch.classes.MyCustomClass.TensorQueue(torch.empty(0).fill_(-1))
+
+ class Mod(torch.nn.Module):
+ def forward(self, tq, x):
+ tq.push(x.sin())
+ tq.push(x.cos())
+ poped_t = tq.pop()
+ assert torch.allclose(poped_t, x.sin())
+ return tq, poped_t
+
+ tq, poped_t = torch.compile(Mod(), backend="eager", fullgraph=True)(tq, torch.randn(2, 3))
+ assert tq.size() == 1
+
+ exported_program = torch.export.export(Mod(), (tq, torch.randn(2, 3),), strict=False)
+ exported_program.module()(tq, torch.randn(2, 3))
+
+We can also implement custom ops that take custom classes as inputs. For
+example, we could register a custom op ``for_each_add_(tq, tensor)``
+
+.. code-block:: cpp
+
+ struct TensorQueue : torch::CustomClassHolder {
+ ...
+ void for_each_add_(at::Tensor inc) {
+ for (auto& t : queue_) {
+ t.add_(inc);
+ }
+ }
+ ...
+ }
+
+
+ TORCH_LIBRARY_FRAGMENT(MyCustomClass, m) {
+ m.class_("TensorQueue")
+ ...
+ .def("for_each_add_", &TensorQueue::for_each_add_);
+
+ m.def(
+ "for_each_add_(__torch__.torch.classes.MyCustomClass.TensorQueue foo, Tensor inc) -> ()");
+ }
+
+ void for_each_add_(c10::intrusive_ptr tq, at::Tensor inc) {
+ tq->for_each_add_(inc);
+ }
+
+ TORCH_LIBRARY_IMPL(MyCustomClass, CPU, m) {
+ m.impl("for_each_add_", for_each_add_);
+ }
+
+
+Since the fake class is implemented in python, we require the fake
+implementation of custom op must also be registered in python:
+
+.. code-block:: python
+
+ @torch.library.register_fake("MyCustomClass::for_each_add_")
+ def fake_for_each_add_(tq, inc):
+ tq.for_each_add_(inc)
+
+After re-compilation, we can export the custom op with:
+
+.. code-block:: python
+
+ class ForEachAdd(torch.nn.Module):
+ def forward(self, tq: torch.ScriptObject, a: torch.Tensor) -> torch.ScriptObject:
+ torch.ops.MyCustomClass.for_each_add_(tq, a)
+ return tq
+
+ mod = ForEachAdd()
+ tq = empty_tensor_queue()
+ qlen = 10
+ for i in range(qlen):
+ tq.push(torch.zeros(1))
+
+ ep = torch.export.export(mod, (tq, torch.ones(1)), strict=False)
+
+Why do we need to make a Fake Class?
+------------------------------------
+
+Tracing with real custom object has several major downsides:
+
+1. Operators on real objects can be time consuming e.g. the custom object
+ might be reading from the network or loading data from the disk.
+
+2. We don’t want to mutate the real custom object or create side-effects to the environment while tracing.
+
+3. It cannot support dynamic shapes.
+
+However, it may be difficult for users to write a fake class, e.g. if the
+original class uses some third-party library that determines the output shape of
+the methods, or is complicated and written by others. In such cases, users can
+disable the fakification requirement by defining a ``tracing_mode`` method to
+return ``"real"``:
+
+.. code-block:: cpp
+
+ std::string tracing_mode() {
+ return "real";
+ }
+
+
+A caveat of fakification is regarding **tensor aliasing.** We assume that no
+tensors within a torchbind object aliases a tensor outside of the torchbind
+object. Therefore, mutating one of these tensors will result in undefined
+behavior.
diff --git a/advanced_source/custom_classes.rst b/advanced_source/custom_classes.rst
new file mode 100644
index 00000000000..014bac2eebf
--- /dev/null
+++ b/advanced_source/custom_classes.rst
@@ -0,0 +1,231 @@
+Extending PyTorch with Custom C++ Classes
+===============================================
+
+
+This tutorial introduces an API for binding C++ classes into PyTorch.
+The API is very similar to
+`pybind11 `_, and most of the concepts will transfer
+over if you're familiar with that system.
+
+Implementing and Binding the Class in C++
+-----------------------------------------
+
+For this tutorial, we are going to define a simple C++ class that maintains persistent
+state in a member variable.
+
+.. literalinclude:: ../advanced_source/custom_classes/custom_class_project/class.cpp
+ :language: cpp
+ :start-after: BEGIN class
+ :end-before: END class
+
+There are several things to note:
+
+- ``torch/custom_class.h`` is the header you need to include to extend PyTorch
+ with your custom class.
+- Notice that whenever we are working with instances of the custom
+ class, we do it via instances of ``c10::intrusive_ptr<>``. Think of ``intrusive_ptr``
+ as a smart pointer like ``std::shared_ptr``, but the reference count is stored
+ directly in the object, as opposed to a separate metadata block (as is done in
+ ``std::shared_ptr``. ``torch::Tensor`` internally uses the same pointer type;
+ and custom classes have to also use this pointer type so that we can
+ consistently manage different object types.
+- The second thing to notice is that the user-defined class must inherit from
+ ``torch::CustomClassHolder``. This ensures that the custom class has space to
+ store the reference count.
+
+Now let's take a look at how we will make this class visible to PyTorch, a process called
+*binding* the class:
+
+.. literalinclude:: ../advanced_source/custom_classes/custom_class_project/class.cpp
+ :language: cpp
+ :start-after: BEGIN binding
+ :end-before: END binding
+ :append:
+ ;
+ }
+
+
+
+Building the Example as a C++ Project With CMake
+------------------------------------------------
+
+Now, we're going to build the above C++ code with the `CMake
+`_ build system. First, take all the C++ code
+we've covered so far and place it in a file called ``class.cpp``.
+Then, write a simple ``CMakeLists.txt`` file and place it in the
+same directory. Here is what ``CMakeLists.txt`` should look like:
+
+.. literalinclude:: ../advanced_source/custom_classes/custom_class_project/CMakeLists.txt
+ :language: cmake
+
+Also, create a ``build`` directory. Your file tree should look like this::
+
+ custom_class_project/
+ class.cpp
+ CMakeLists.txt
+ build/
+
+Go ahead and invoke cmake and then make to build the project:
+
+.. code-block:: shell
+
+ $ cd build
+ $ cmake -DCMAKE_PREFIX_PATH="$(python -c 'import torch.utils; print(torch.utils.cmake_prefix_path)')" ..
+ -- The C compiler identification is GNU 7.3.1
+ -- The CXX compiler identification is GNU 7.3.1
+ -- Check for working C compiler: /opt/rh/devtoolset-7/root/usr/bin/cc
+ -- Check for working C compiler: /opt/rh/devtoolset-7/root/usr/bin/cc -- works
+ -- Detecting C compiler ABI info
+ -- Detecting C compiler ABI info - done
+ -- Detecting C compile features
+ -- Detecting C compile features - done
+ -- Check for working CXX compiler: /opt/rh/devtoolset-7/root/usr/bin/c++
+ -- Check for working CXX compiler: /opt/rh/devtoolset-7/root/usr/bin/c++ -- works
+ -- Detecting CXX compiler ABI info
+ -- Detecting CXX compiler ABI info - done
+ -- Detecting CXX compile features
+ -- Detecting CXX compile features - done
+ -- Looking for pthread.h
+ -- Looking for pthread.h - found
+ -- Looking for pthread_create
+ -- Looking for pthread_create - not found
+ -- Looking for pthread_create in pthreads
+ -- Looking for pthread_create in pthreads - not found
+ -- Looking for pthread_create in pthread
+ -- Looking for pthread_create in pthread - found
+ -- Found Threads: TRUE
+ -- Found torch: /torchbind_tutorial/libtorch/lib/libtorch.so
+ -- Configuring done
+ -- Generating done
+ -- Build files have been written to: /torchbind_tutorial/build
+ $ make -j
+ Scanning dependencies of target custom_class
+ [ 50%] Building CXX object CMakeFiles/custom_class.dir/class.cpp.o
+ [100%] Linking CXX shared library libcustom_class.so
+ [100%] Built target custom_class
+
+What you'll find is there is now (among other things) a dynamic library
+file present in the build directory. On Linux, this is probably named
+``libcustom_class.so``. So the file tree should look like::
+
+ custom_class_project/
+ class.cpp
+ CMakeLists.txt
+ build/
+ libcustom_class.so
+
+Using the C++ Class from Python
+-----------------------------------------------
+
+Now that we have our class and its registration compiled into an ``.so`` file,
+we can load that `.so` into Python and try it out. Here's a script that
+demonstrates that:
+
+.. literalinclude:: ../advanced_source/custom_classes/custom_class_project/custom_test.py
+ :language: python
+
+
+Defining Serialization/Deserialization Methods for Custom C++ Classes
+---------------------------------------------------------------------
+
+If you try to save a ``ScriptModule`` with a custom-bound C++ class as
+an attribute, you'll get the following error:
+
+.. literalinclude:: ../advanced_source/custom_classes/custom_class_project/export_attr.py
+ :language: python
+
+.. code-block:: shell
+
+ $ python export_attr.py
+ RuntimeError: Cannot serialize custom bound C++ class __torch__.torch.classes.my_classes.MyStackClass. Please define serialization methods via def_pickle for this class. (pushIValueImpl at ../torch/csrc/jit/pickler.cpp:128)
+
+This is because PyTorch cannot automatically figure out what information
+save from your C++ class. You must specify that manually. The way to do that
+is to define ``__getstate__`` and ``__setstate__`` methods on the class using
+the special ``def_pickle`` method on ``class_``.
+
+.. note::
+ The semantics of ``__getstate__`` and ``__setstate__`` are
+ equivalent to that of the Python pickle module. You can
+ `read more `_
+ about how we use these methods.
+
+Here is an example of the ``def_pickle`` call we can add to the registration of
+``MyStackClass`` to include serialization methods:
+
+.. literalinclude:: ../advanced_source/custom_classes/custom_class_project/class.cpp
+ :language: cpp
+ :start-after: BEGIN def_pickle
+ :end-before: END def_pickle
+
+.. note::
+ We take a different approach from pybind11 in the pickle API. Whereas pybind11
+ as a special function ``pybind11::pickle()`` which you pass into ``class_::def()``,
+ we have a separate method ``def_pickle`` for this purpose. This is because the
+ name ``torch::jit::pickle`` was already taken, and we didn't want to cause confusion.
+
+Once we have defined the (de)serialization behavior in this way, our script can
+now run successfully:
+
+.. code-block:: shell
+
+ $ python ../export_attr.py
+ testing
+
+Defining Custom Operators that Take or Return Bound C++ Classes
+---------------------------------------------------------------
+
+Once you've defined a custom C++ class, you can also use that class
+as an argument or return from a custom operator (i.e. free functions). Suppose
+you have the following free function:
+
+.. literalinclude:: ../advanced_source/custom_classes/custom_class_project/class.cpp
+ :language: cpp
+ :start-after: BEGIN free_function
+ :end-before: END free_function
+
+You can register it running the following code inside your ``TORCH_LIBRARY``
+block:
+
+.. literalinclude:: ../advanced_source/custom_classes/custom_class_project/class.cpp
+ :language: cpp
+ :start-after: BEGIN def_free
+ :end-before: END def_free
+
+Once this is done, you can use the op like the following example:
+
+.. code-block:: python
+
+ class TryCustomOp(torch.nn.Module):
+ def __init__(self):
+ super(TryCustomOp, self).__init__()
+ self.f = torch.classes.my_classes.MyStackClass(["foo", "bar"])
+
+ def forward(self):
+ return torch.ops.my_classes.manipulate_instance(self.f)
+
+.. note::
+
+ Registration of an operator that takes a C++ class as an argument requires that
+ the custom class has already been registered. You can enforce this by
+ making sure the custom class registration and your free function definitions
+ are in the same ``TORCH_LIBRARY`` block, and that the custom class
+ registration comes first. In the future, we may relax this requirement,
+ so that these can be registered in any order.
+
+
+Conclusion
+----------
+
+This tutorial walked you through how to expose a C++ class to PyTorch, how to
+register its methods, how to use that class from Python, and how to save and
+load code using the class and run that code in a standalone C++ process. You
+are now ready to extend your PyTorch models with C++ classes that interface
+with third party C++ libraries or implement any other use case that requires
+the lines between Python and C++ to blend smoothly.
+
+As always, if you run into any problems or have questions, you can use our
+`forum `_ or `GitHub issues
+`_ to get in touch. Also, our
+`frequently asked questions (FAQ) page
+`_ may have helpful information.
diff --git a/advanced_source/custom_classes/CMakeLists.txt b/advanced_source/custom_classes/CMakeLists.txt
new file mode 100644
index 00000000000..6a1eb3e87fa
--- /dev/null
+++ b/advanced_source/custom_classes/CMakeLists.txt
@@ -0,0 +1,15 @@
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
+project(infer)
+
+find_package(Torch REQUIRED)
+
+add_subdirectory(custom_class_project)
+
+# Define our library target
+add_executable(infer infer.cpp)
+set(CMAKE_CXX_STANDARD 14)
+# Link against LibTorch
+target_link_libraries(infer "${TORCH_LIBRARIES}")
+# This is where we link in our libcustom_class code, making our
+# custom class available in our binary.
+target_link_libraries(infer -Wl,--no-as-needed custom_class)
diff --git a/advanced_source/custom_classes/custom_class_project/CMakeLists.txt b/advanced_source/custom_classes/custom_class_project/CMakeLists.txt
new file mode 100644
index 00000000000..bb3d41aa997
--- /dev/null
+++ b/advanced_source/custom_classes/custom_class_project/CMakeLists.txt
@@ -0,0 +1,10 @@
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
+project(custom_class)
+
+find_package(Torch REQUIRED)
+
+# Define our library target
+add_library(custom_class SHARED class.cpp)
+set(CMAKE_CXX_STANDARD 14)
+# Link against LibTorch
+target_link_libraries(custom_class "${TORCH_LIBRARIES}")
diff --git a/advanced_source/custom_classes/custom_class_project/class.cpp b/advanced_source/custom_classes/custom_class_project/class.cpp
new file mode 100644
index 00000000000..dc89a3ecb2e
--- /dev/null
+++ b/advanced_source/custom_classes/custom_class_project/class.cpp
@@ -0,0 +1,132 @@
+// BEGIN class
+// This header is all you need to do the C++ portions of this
+// tutorial
+#include
+// This header is what defines the custom class registration
+// behavior specifically. script.h already includes this, but
+// we include it here so you know it exists in case you want
+// to look at the API or implementation.
+#include
+
+#include
+#include
+
+template
+struct MyStackClass : torch::CustomClassHolder {
+ std::vector stack_;
+ MyStackClass(std::vector init) : stack_(init.begin(), init.end()) {}
+
+ void push(T x) {
+ stack_.push_back(x);
+ }
+ T pop() {
+ auto val = stack_.back();
+ stack_.pop_back();
+ return val;
+ }
+
+ c10::intrusive_ptr clone() const {
+ return c10::make_intrusive(stack_);
+ }
+
+ void merge(const c10::intrusive_ptr