Skip to content

Commit c9db0c5

Browse files
Update
[ghstack-poisoned]
2 parents fd80d32 + 01a0044 commit c9db0c5

File tree

463 files changed

+6185
-5105
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

463 files changed

+6185
-5105
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
ca4783992ed7602a39528ba304d61f00396b2a5a
1+
16b633b4daa7f3d3442be62a3589bd60b2f7fdc7

.ci/docker/libtorch/Dockerfile

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,11 @@ RUN bash ./install_cuda.sh 12.4
6666
RUN bash ./install_magma.sh 12.4
6767
RUN ln -sf /usr/local/cuda-12.4 /usr/local/cuda
6868

69+
FROM cuda as cuda12.6
70+
RUN bash ./install_cuda.sh 12.6
71+
RUN bash ./install_magma.sh 12.6
72+
RUN ln -sf /usr/local/cuda-12.6 /usr/local/cuda
73+
6974
FROM cpu as rocm
7075
ARG PYTORCH_ROCM_ARCH
7176
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}

.ci/docker/requirements-ci.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ tb-nightly==2.13.0a20230426
257257
#test that import:
258258

259259
# needed by torchgen utils
260-
typing-extensions
260+
typing-extensions>=4.10.0
261261
#Description: type hints for python
262262
#Pinned versions:
263263
#test that import:

.github/ci_commit_pins/audio.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
79047bf6bdec9e32c4cffd0f9835b347781fefbf
1+
fa44bdab1fe49bab58389e7b6a33061ffced9bc7

.github/workflows/build-libtorch-images.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ jobs:
4444
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral"
4545
strategy:
4646
matrix:
47-
cuda_version: ["12.4", "12.1", "11.8"]
47+
cuda_version: ["12.6", "12.4", "12.1", "11.8"]
4848
env:
4949
GPU_ARCH_TYPE: cuda
5050
GPU_ARCH_VERSION: ${{ matrix.cuda_version }}

.github/workflows/build-manywheel-images.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ jobs:
4848
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral"
4949
strategy:
5050
matrix:
51-
cuda_version: ["12.4", "12.1", "11.8"]
51+
cuda_version: ["12.6", "12.4", "12.1", "11.8"]
5252
env:
5353
GPU_ARCH_TYPE: cuda
5454
GPU_ARCH_VERSION: ${{ matrix.cuda_version }}

.github/workflows/pull.yml

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -54,10 +54,11 @@ jobs:
5454
docker-image-name: pytorch-linux-jammy-py3.9-gcc11
5555
test-matrix: |
5656
{ include: [
57-
{ config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
58-
{ config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
59-
{ config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
60-
{ config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
57+
{ config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
58+
{ config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
59+
{ config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
60+
{ config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
61+
{ config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
6162
{ config: "docs_test", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
6263
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
6364
{ config: "backwards_compat", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
@@ -186,10 +187,11 @@ jobs:
186187
docker-image-name: pytorch-linux-focal-py3.9-clang10
187188
test-matrix: |
188189
{ include: [
189-
{ config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
190-
{ config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
191-
{ config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
192-
{ config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
190+
{ config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
191+
{ config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
192+
{ config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
193+
{ config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
194+
{ config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
193195
{ config: "crossref", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
194196
{ config: "crossref", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
195197
{ config: "dynamo", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
@@ -218,10 +220,11 @@ jobs:
218220
docker-image-name: pytorch-linux-focal-py3.11-clang10
219221
test-matrix: |
220222
{ include: [
221-
{ config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
222-
{ config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
223-
{ config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
224-
{ config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
223+
{ config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
224+
{ config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
225+
{ config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
226+
{ config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
227+
{ config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
225228
{ config: "crossref", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
226229
{ config: "crossref", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
227230
{ config: "dynamo", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
@@ -252,10 +255,11 @@ jobs:
252255
docker-image-name: pytorch-linux-focal-py3.12-clang10
253256
test-matrix: |
254257
{ include: [
255-
{ config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
256-
{ config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
257-
{ config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
258-
{ config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
258+
{ config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
259+
{ config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
260+
{ config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
261+
{ config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
262+
{ config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
259263
{ config: "dynamo", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
260264
{ config: "dynamo", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
261265
{ config: "dynamo", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },

.lintrunner.toml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -214,10 +214,6 @@ include_patterns = [
214214
'torch/csrc/*.cpp',
215215
'torch/csrc/**/*.h',
216216
'torch/csrc/**/*.cpp',
217-
'torch/csrc/distributed/autograd/**/*.cpp',
218-
'torch/csrc/distributed/autograd/**/*.h',
219-
'torch/csrc/distributed/rpc/**/*.cpp',
220-
'torch/csrc/distributed/rpc/**/*.h',
221217
'torch/csrc/jit/serialization/*.h',
222218
'torch/csrc/jit/serialization/*.cpp',
223219
]
@@ -245,7 +241,11 @@ exclude_patterns = [
245241
'torch/csrc/api/include/torch/linalg.h',
246242
'torch/csrc/api/include/torch/nn/pimpl-inl.h',
247243
'torch/csrc/autograd/generated/**',
248-
'torch/csrc/distributed/**/*',
244+
'torch/csrc/distributed/**/*.cu',
245+
'torch/csrc/distributed/c10d/CUDASymmetricMemory-inl.h',
246+
'torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp',
247+
'torch/csrc/distributed/c10d/WinSockUtils.hpp',
248+
'torch/csrc/distributed/c10d/quantization/quantization_gpu.h',
249249
'torch/csrc/dynamo/eval_frame.h',
250250
'torch/csrc/inductor/aoti_torch/c/shim.h',
251251
'torch/csrc/jit/**/*',

CONTRIBUTING.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -286,6 +286,11 @@ The following packages should be installed with either `conda` or `pip`:
286286
- `expecttest` and `hypothesis` - required to run tests
287287
- `mypy` - recommended for linting
288288
- `pytest` - recommended to run tests more selectively
289+
Running
290+
```
291+
pip install -r requirements
292+
```
293+
will install these dependencies for you.
289294

290295
All PyTorch test suites are located in the `test` folder and start with
291296
`test_`. Run the entire test

aten/src/ATen/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ if(NOT BUILD_LITE_INTERPRETER)
5454
endif()
5555
EXCLUDE(ATen_CORE_SRCS "${ATen_CORE_SRCS}" ${ATen_CORE_TEST_SRCS})
5656

57-
file(GLOB base_h "*.h" "detail/*.h" "cpu/*.h" "cpu/vec/vec512/*.h" "cpu/vec/vec256/*.h" "cpu/vec/vec256/vsx/*.h" "cpu/vec/vec256/zarch/*.h" "cpu/vec/sve/*.h" "cpu/vec/*.h" "quantized/*.h" "functorch/*.h")
57+
file(GLOB base_h "*.h" "detail/*.h" "cpu/*.h" "cpu/vec/vec512/*.h" "cpu/vec/vec128/*.h" "cpu/vec/vec256/*.h" "cpu/vec/vec256/vsx/*.h" "cpu/vec/vec256/zarch/*.h" "cpu/vec/sve/*.h" "cpu/vec/*.h" "quantized/*.h" "functorch/*.h")
5858
file(GLOB base_cpp "*.cpp" "detail/*.cpp" "cpu/*.cpp" "functorch/*.cpp")
5959
file(GLOB cuda_h "cuda/*.h" "cuda/detail/*.h" "cuda/*.cuh" "cuda/detail/*.cuh" "cuda/tunable/*.cuh" "cuda/tunable/*.h")
6060
file(GLOB cuda_cpp "cuda/*.cpp" "cuda/detail/*.cpp" "cuda/tunable/*.cpp")

0 commit comments

Comments
 (0)