Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 18 Apr 2026 01:38:20 +0000
From:      Yuri Victorovich <yuri@FreeBSD.org>
To:        ports-committers@FreeBSD.org, dev-commits-ports-all@FreeBSD.org, dev-commits-ports-main@FreeBSD.org
Subject:   git: 0d79a064273a - main - misc/py-pytorch: update 2.10.0=?utf-8?Q? =E2=86=92 2.1?=1.0
Message-ID:  <69e2e08c.4577f.7d22215c@gitrepo.freebsd.org>

index | next in thread | raw e-mail

The branch main has been updated by yuri:

URL: https://cgit.FreeBSD.org/ports/commit/?id=0d79a064273aa3b8c6ab70464c9c9937d2318cb3

commit 0d79a064273aa3b8c6ab70464c9c9937d2318cb3
Author:     Yuri Victorovich <yuri@FreeBSD.org>
AuthorDate: 2026-04-18 01:35:13 +0000
Commit:     Yuri Victorovich <yuri@FreeBSD.org>
CommitDate: 2026-04-18 01:38:08 +0000

    misc/py-pytorch: update 2.10.0 → 2.11.0
    
    PR:             294601
    Reported by:    Robert Cina <transitive@gmail.com>
---
 misc/py-pytorch/Makefile                           |  7 ++-
 misc/py-pytorch/distinfo                           | 10 ++--
 misc/py-pytorch/files/patch-CMakeLists.txt         |  4 +-
 ...patch-aten_src_ATen_native_vulkan_ops_Clamp.cpp | 11 ++++
 misc/py-pytorch/files/patch-c10_core_DynamicCast.h |  4 +-
 .../files/patch-cmake_Dependencies.cmake           |  4 +-
 misc/py-pytorch/files/patch-pyproject.toml         | 13 +++--
 misc/py-pytorch/files/patch-requirements-build.txt |  8 +--
 misc/py-pytorch/files/patch-setup.py               |  9 ++--
 misc/py-pytorch/files/patch-test_run__test.py      | 59 ++++++++++++++++++++++
 .../patch-third__party_fmt_include_fmt_format.h    |  4 +-
 .../files/patch-tools_build__pytorch__libs.py      |  4 +-
 ...orch_csrc_distributed_c10d_ProcessGroupGloo.cpp |  6 +--
 .../patch-torch_csrc_distributed_c10d_init.cpp     | 11 ++++
 .../files/patch-torch_csrc_jit_python_init.cpp     | 20 ++++++++
 .../patch-torch_csrc_utils_python__arg__parser.cpp | 17 +++++++
 misc/py-pytorch/files/patch-torch_jit___trace.py   | 16 ++++++
 misc/py-pytorch/pkg-descr                          |  9 +++-
 18 files changed, 178 insertions(+), 38 deletions(-)

diff --git a/misc/py-pytorch/Makefile b/misc/py-pytorch/Makefile
index 79784241e790..0a8ea89d294c 100644
--- a/misc/py-pytorch/Makefile
+++ b/misc/py-pytorch/Makefile
@@ -1,7 +1,6 @@
 PORTNAME=	pytorch
 DISTVERSIONPREFIX=	v
-DISTVERSION=	2.10.0
-PORTREVISION=	4
+DISTVERSION=	2.11.0
 CATEGORIES=	misc # machine-learning
 MASTER_SITES=	https://github.com/pytorch/pytorch/releases/download/v${DISTVERSION}/
 PKGNAMEPREFIX=	${PYTHON_PKGNAMEPREFIX}
@@ -66,7 +65,7 @@ PYDISTUTILS_PKGNAME=	torch
 PEP517_INSTALL_CMD=	${PYTHON_CMD} -m installer --destdir ${STAGEDIR} --prefix ${PREFIX} ${BUILD_WRKSRC}/dist/torch-${DISTVERSION}*.whl
 
 USE_GITHUB=	nodefault
-GH_TUPLE=	pytorch:cpuinfo:1e83a2f:cpuinfo/cpuinfo-with-freebsd-support # https://github.com/pytorch/cpuinfo/pull/230/commits
+GH_TUPLE=	pytorch:cpuinfo:bc3c01e:cpuinfo/cpuinfo-with-freebsd-support # https://github.com/pytorch/cpuinfo/pull/230/commits
 
 MAKE_ENV=	USE_NINJA=no # ninja breaks for some reason
 MAKE_ENV+=	BUILD_TEST=0 # ninja breaks for some reason
@@ -128,6 +127,6 @@ fix-plist: # remove the stray %%PYTHON_SITELIBDIR%%/caffe2 file
 	@${REINPLACE_CMD} -e "s|.*/caffe2$$||" ${TMPPLIST}
 
 do-test:
-	cd ${TEST_WRKSRC} && ${SETENV} ${TEST_ENV} ${PYTHON_CMD} run_test.py
+	@cd ${TEST_WRKSRC} && ${SETENV} ${TEST_ENV} ${PYTHON_CMD} run_test.py
 
 .include <bsd.port.mk>
diff --git a/misc/py-pytorch/distinfo b/misc/py-pytorch/distinfo
index 3a750c8759a0..ce70ed2d40ad 100644
--- a/misc/py-pytorch/distinfo
+++ b/misc/py-pytorch/distinfo
@@ -1,5 +1,5 @@
-TIMESTAMP = 1769236907
-SHA256 (pytorch/pytorch-v2.10.0.tar.gz) = fa8ccbe87f83f48735505371c1c313b4aa6db400b0ae4f8a02844d1e150c695f
-SIZE (pytorch/pytorch-v2.10.0.tar.gz) = 372567293
-SHA256 (pytorch/pytorch-cpuinfo-1e83a2f_GH0.tar.gz) = b8945cf45d4f13054d7f52e8bd9314690ea840d4d0ff21febd95ff04de3c613e
-SIZE (pytorch/pytorch-cpuinfo-1e83a2f_GH0.tar.gz) = 3541839
+TIMESTAMP = 1776459266
+SHA256 (pytorch/pytorch-v2.11.0.tar.gz) = ab3fde9e7e382f45ac942be6ea2c2ef362c5ccd6f55ed6d5f35e6ea81d3ab88e
+SIZE (pytorch/pytorch-v2.11.0.tar.gz) = 421160531
+SHA256 (pytorch/pytorch-cpuinfo-bc3c01e_GH0.tar.gz) = 09459e830a588fee730feb19d8a32fedf2d36d48db7bd9b18c3159c7e74ea0ce
+SIZE (pytorch/pytorch-cpuinfo-bc3c01e_GH0.tar.gz) = 3547453
diff --git a/misc/py-pytorch/files/patch-CMakeLists.txt b/misc/py-pytorch/files/patch-CMakeLists.txt
index 4493ea8e5155..42c8f2d0d781 100644
--- a/misc/py-pytorch/files/patch-CMakeLists.txt
+++ b/misc/py-pytorch/files/patch-CMakeLists.txt
@@ -1,4 +1,4 @@
---- CMakeLists.txt.orig	2026-01-21 17:08:59 UTC
+--- CMakeLists.txt.orig	2026-03-23 18:40:42 UTC
 +++ CMakeLists.txt
 @@ -173,7 +173,7 @@ set(CPU_RISCV OFF)
  set(CPU_POWER OFF)
@@ -27,7 +27,7 @@
  option(USE_MAGMA "Use MAGMA" ON)
  option(USE_PYTORCH_METAL "Use Metal for PyTorch iOS build" OFF)
  option(USE_PYTORCH_METAL_EXPORT "Export Metal models on MacOSX desktop" OFF)
-@@ -476,15 +476,15 @@ option(USE_SYSTEM_CPUINFO "Use system-provided cpuinfo
+@@ -481,15 +481,15 @@ option(USE_SYSTEM_CPUINFO "Use system-provided cpuinfo
  # USE_SYSTEM_LIBS being "OFF".
  option(USE_SYSTEM_LIBS "Use all available system-provided libraries." OFF)
  option(USE_SYSTEM_CPUINFO "Use system-provided cpuinfo." OFF)
diff --git a/misc/py-pytorch/files/patch-aten_src_ATen_native_vulkan_ops_Clamp.cpp b/misc/py-pytorch/files/patch-aten_src_ATen_native_vulkan_ops_Clamp.cpp
new file mode 100644
index 000000000000..3e1ef08cca8a
--- /dev/null
+++ b/misc/py-pytorch/files/patch-aten_src_ATen_native_vulkan_ops_Clamp.cpp
@@ -0,0 +1,11 @@
+--- aten/src/ATen/native/vulkan/ops/Clamp.cpp.orig	2026-04-17 23:38:43 UTC
++++ aten/src/ATen/native/vulkan/ops/Clamp.cpp
+@@ -601,7 +601,7 @@ TORCH_LIBRARY_IMPL(aten, Vulkan, m) {
+   m.impl(TORCH_SELECTIVE_NAME("aten::hardsigmoid"), hardsigmoid);
+   m.impl(TORCH_SELECTIVE_NAME("aten::hardsigmoid_"), hardsigmoid_);
+   m.impl(TORCH_SELECTIVE_NAME("aten::hardshrink"), hardshrink);
+-  m.impl(TORCH_SELECTIVE_NAME("aten::hardshrink_"), hardshrink_);
++  // aten::hardshrink_ has no schema in native_functions.yaml
+   m.impl(TORCH_SELECTIVE_NAME("aten::hardswish"), hardswish);
+   m.impl(TORCH_SELECTIVE_NAME("aten::hardswish_"), hardswish_);
+   m.impl(TORCH_SELECTIVE_NAME("aten::hardtanh"), hardtanh);
diff --git a/misc/py-pytorch/files/patch-c10_core_DynamicCast.h b/misc/py-pytorch/files/patch-c10_core_DynamicCast.h
index f3f9af294a30..a39f3dc08dcd 100644
--- a/misc/py-pytorch/files/patch-c10_core_DynamicCast.h
+++ b/misc/py-pytorch/files/patch-c10_core_DynamicCast.h
@@ -2,7 +2,7 @@
 - /usr/local/lib/python3.9/site-packages/torch/include/c10/core/DynamicCast.h:112:22: error: use of undeclared identifier '__assert_fail'
 - see https://github.com/pytorch/pytorch/issues/113941
 
---- c10/core/DynamicCast.h.orig	2024-07-24 18:41:35 UTC
+--- c10/core/DynamicCast.h.orig	2026-03-23 18:40:42 UTC
 +++ c10/core/DynamicCast.h
 @@ -54,7 +54,7 @@ namespace c10 {
  //
@@ -13,7 +13,7 @@
  #else
  #define ERROR_UNSUPPORTED_CAST TORCH_CHECK(false, "Unexpected scalar type");
  #endif
-@@ -105,13 +105,13 @@ C10_HOST_DEVICE inline void cast_and_store(
+@@ -109,13 +109,13 @@ C10_HOST_DEVICE inline void cast_and_store(
    template <>                                                 \
    C10_HOST_DEVICE inline T fetch_and_cast<T>(                 \
        const ScalarType src_type, const void* ptr) {           \
diff --git a/misc/py-pytorch/files/patch-cmake_Dependencies.cmake b/misc/py-pytorch/files/patch-cmake_Dependencies.cmake
index d97ba9f5940e..0366113842b8 100644
--- a/misc/py-pytorch/files/patch-cmake_Dependencies.cmake
+++ b/misc/py-pytorch/files/patch-cmake_Dependencies.cmake
@@ -1,6 +1,6 @@
---- cmake/Dependencies.cmake.orig	2024-07-24 18:41:35 UTC
+--- cmake/Dependencies.cmake.orig	2026-03-23 18:40:42 UTC
 +++ cmake/Dependencies.cmake
-@@ -304,7 +304,7 @@ if(USE_NNPACK OR USE_PYTORCH_QNNPACK OR USE_XNNPACK)
+@@ -348,7 +348,7 @@ if(USE_NNPACK OR USE_PYTORCH_QNNPACK OR USE_XNNPACK)
        set(DISABLE_NNPACK_AND_FAMILY ON)
      endif()
    else()
diff --git a/misc/py-pytorch/files/patch-pyproject.toml b/misc/py-pytorch/files/patch-pyproject.toml
index b22842375119..43b400577cf1 100644
--- a/misc/py-pytorch/files/patch-pyproject.toml
+++ b/misc/py-pytorch/files/patch-pyproject.toml
@@ -1,21 +1,20 @@
---- pyproject.toml.orig	2026-01-24 08:31:03 UTC
+--- pyproject.toml.orig	2026-03-23 18:40:42 UTC
 +++ pyproject.toml
-@@ -4,9 +4,11 @@ requires = [
+@@ -4,9 +4,10 @@ requires = [
  requires = [
      # 70.1.0: min version for integrated bdist_wheel command from wheel package
      # 77.0.0: min version for SPDX expression support for project.license
--    "setuptools>=70.1.0",
+-    "setuptools>=70.1.0,<82",
 -    "cmake>=3.27",
 -    "ninja",
 +    # FreeBSD: patched to work with setuptools-63.1.0
 +    "setuptools>=63.1.0",
-+    # FreeBSD: cmake and ninja are provided as BUILD_DEPENDS
-+    # "cmake>=3.27",
-+    # "ninja",
++    #"cmake>=3.27",
++    #"ninja",
      "numpy",
      "packaging",
      "pyyaml",
-@@ -22,9 +24,11 @@ dev = [
+@@ -22,9 +23,11 @@ dev = [
      # in PyTorch root until the project fully migrates to pyproject.toml
      # after which this can be removed as it is already specified in the
      # [build-system] section
diff --git a/misc/py-pytorch/files/patch-requirements-build.txt b/misc/py-pytorch/files/patch-requirements-build.txt
index 580e6e40d6af..7fcb95756d47 100644
--- a/misc/py-pytorch/files/patch-requirements-build.txt
+++ b/misc/py-pytorch/files/patch-requirements-build.txt
@@ -1,15 +1,15 @@
---- requirements-build.txt.orig	2026-01-24 08:31:03 UTC
+--- requirements-build.txt.orig	2026-03-23 18:40:42 UTC
 +++ requirements-build.txt
 @@ -1,7 +1,9 @@
  # Build System requirements
--setuptools>=70.1.0
+-setuptools>=70.1.0,<82
 -cmake>=3.27
 -ninja
 +# FreeBSD: patched to work with setuptools-63.1.0
 +setuptools>=63.1.0
 +# FreeBSD: cmake and ninja are provided as BUILD_DEPENDS
-+# cmake>=3.27
-+# ninja
++#cmake>=3.27
++#ninja
  numpy
  packaging
  pyyaml
diff --git a/misc/py-pytorch/files/patch-setup.py b/misc/py-pytorch/files/patch-setup.py
index 47ad8e6a118b..b4e96e80818d 100644
--- a/misc/py-pytorch/files/patch-setup.py
+++ b/misc/py-pytorch/files/patch-setup.py
@@ -1,4 +1,4 @@
---- setup.py.orig	2026-01-24 08:17:11 UTC
+--- setup.py.orig	2026-03-23 18:40:42 UTC
 +++ setup.py
 @@ -286,7 +286,7 @@ from typing import Any, ClassVar, IO
  from pathlib import Path
@@ -9,7 +9,7 @@
  import setuptools.command.build_ext
  import setuptools.command.sdist
  import setuptools.errors
-@@ -1429,29 +1429,37 @@ class concat_license_files:
+@@ -1435,30 +1435,37 @@ class concat_license_files:
          self.f1.write_text(self.bsd_text, encoding="utf-8")
  
  
@@ -38,7 +38,8 @@
 +            super().write_wheelfile(*args, **kwargs)
  
 -        if BUILD_LIBTORCH_WHL:
--            assert self.bdist_dir is not None
+-            if self.bdist_dir is None:
+-                raise AssertionError("self.bdist_dir must not be None")
 -            bdist_dir = Path(self.bdist_dir)
 -            # Remove extraneneous files in the libtorch wheel
 -            for file in itertools.chain(
@@ -67,7 +68,7 @@
  
  
  class clean(Command):
-@@ -1640,11 +1648,12 @@ def configure_extension_build() -> tuple[
+@@ -1647,11 +1654,12 @@ def configure_extension_build() -> tuple[
      ext_modules.append(C)
  
      cmdclass = {
diff --git a/misc/py-pytorch/files/patch-test_run__test.py b/misc/py-pytorch/files/patch-test_run__test.py
new file mode 100644
index 000000000000..4887f9c5e63e
--- /dev/null
+++ b/misc/py-pytorch/files/patch-test_run__test.py
@@ -0,0 +1,59 @@
+--- test/run_test.py.orig	2026-04-17 23:39:17 UTC
++++ test/run_test.py
+@@ -1244,21 +1244,10 @@ def get_pytest_args(options, is_cpp_test=False, is_dis
+ 
+ 
+ def get_pytest_args(options, is_cpp_test=False, is_distributed_test=False):
+-    if is_distributed_test:
+-        # Distributed tests do not support rerun, see https://github.com/pytorch/pytorch/issues/162978
+-        rerun_options = ["-x", "--reruns=0"]
+-    elif RERUN_DISABLED_TESTS:
+-        # ASAN tests are too slow, so running them x50 will cause the jobs to timeout after
+-        # 3+ hours. So, let's opt for less number of reruns. We need at least 150 instances of the
+-        # test every 2 weeks to satisfy the SQL query (15 x 14 = 210).
+-        count = 15 if TEST_WITH_ASAN else 50
+-        # When under rerun-disabled-tests mode, run the same tests multiple times to determine their
+-        # flakiness status. Default to 50 re-runs
+-        rerun_options = ["--flake-finder", f"--flake-runs={count}"]
++    if is_distributed_test or RERUN_DISABLED_TESTS:
++        rerun_options = ["-x"]
+     else:
+-        # When under the normal mode, retry a failed test 2 more times. -x means stop at the first
+-        # failure
+-        rerun_options = ["-x", "--reruns=2"]
++        rerun_options = ["-x"]
+ 
+     pytest_args = [
+         "-vv",
+@@ -1277,7 +1266,7 @@ def get_pytest_args(options, is_cpp_test=False, is_dis
+             # Add the option to generate XML test report here as C++ tests
+             # won't go into common_utils
+             test_report_path = get_report_path(pytest=True)
+-            pytest_args.extend(["--junit-xml-reruns", test_report_path])
++            pytest_args.extend(["--junit-xml", test_report_path])
+ 
+     if options.pytest_k_expr:
+         pytest_args.extend(["-k", options.pytest_k_expr])
+@@ -1812,6 +1801,13 @@ def get_selected_tests(options) -> list[str]:
+             "Skip distributed tests on s390x",
+         )
+ 
++    if sys.platform.startswith("freebsd"):
++        selected_tests = exclude_tests(
++            DISTRIBUTED_TESTS,
++            selected_tests,
++            "Skip distributed tests on FreeBSD (gloo unsupported)",
++        )
++
+     # skip all distributed tests if distributed package is not available.
+     if not dist.is_available():
+         selected_tests = exclude_tests(
+@@ -2100,8 +2096,6 @@ def main():
+ 
+ 
+ def main():
+-    check_pip_packages()
+-
+     options = parse_args()
+     tests_to_include_env = os.environ.get("TESTS_TO_INCLUDE", "").strip()
+     if tests_to_include_env:
diff --git a/misc/py-pytorch/files/patch-third__party_fmt_include_fmt_format.h b/misc/py-pytorch/files/patch-third__party_fmt_include_fmt_format.h
index 05f39aeb7558..28246dbb90da 100644
--- a/misc/py-pytorch/files/patch-third__party_fmt_include_fmt_format.h
+++ b/misc/py-pytorch/files/patch-third__party_fmt_include_fmt_format.h
@@ -1,6 +1,6 @@
---- third_party/fmt/include/fmt/format.h.orig	2025-08-06 17:08:03 UTC
+--- third_party/fmt/include/fmt/format.h.orig	2026-03-23 18:40:46 UTC
 +++ third_party/fmt/include/fmt/format.h
-@@ -44,6 +44,7 @@
+@@ -52,6 +52,7 @@
  #  include <cmath>    // std::signbit
  #  include <cstddef>  // std::byte
  #  include <cstdint>  // uint32_t
diff --git a/misc/py-pytorch/files/patch-tools_build__pytorch__libs.py b/misc/py-pytorch/files/patch-tools_build__pytorch__libs.py
index 7a421c1bd8e3..26247f2b2442 100644
--- a/misc/py-pytorch/files/patch-tools_build__pytorch__libs.py
+++ b/misc/py-pytorch/files/patch-tools_build__pytorch__libs.py
@@ -1,6 +1,6 @@
---- tools/build_pytorch_libs.py.orig	2025-08-06 17:08:06 UTC
+--- tools/build_pytorch_libs.py.orig	2026-03-23 18:40:49 UTC
 +++ tools/build_pytorch_libs.py
-@@ -91,7 +91,8 @@ def build_pytorch(
+@@ -93,7 +93,8 @@ def build_pytorch(
          and not check_negative_env_flag("USE_NCCL")
          and not check_env_flag("USE_SYSTEM_NCCL")
      ):
diff --git a/misc/py-pytorch/files/patch-torch_csrc_distributed_c10d_ProcessGroupGloo.cpp b/misc/py-pytorch/files/patch-torch_csrc_distributed_c10d_ProcessGroupGloo.cpp
index 88578933f045..f62711c019bb 100644
--- a/misc/py-pytorch/files/patch-torch_csrc_distributed_c10d_ProcessGroupGloo.cpp
+++ b/misc/py-pytorch/files/patch-torch_csrc_distributed_c10d_ProcessGroupGloo.cpp
@@ -1,6 +1,6 @@
---- torch/csrc/distributed/c10d/ProcessGroupGloo.cpp.orig	2026-01-21 17:09:02 UTC
+--- torch/csrc/distributed/c10d/ProcessGroupGloo.cpp.orig	2026-03-23 18:40:49 UTC
 +++ torch/csrc/distributed/c10d/ProcessGroupGloo.cpp
-@@ -32,6 +32,10 @@
+@@ -33,6 +33,10 @@
  #include <gloo/rendezvous/context.h>
  #include <gloo/rendezvous/prefix_store.h>
  
@@ -11,7 +11,7 @@
  namespace c10d {
  
  namespace {
-@@ -488,7 +492,7 @@ std::shared_ptr<::gloo::transport::Device> ProcessGrou
+@@ -501,7 +505,7 @@ std::shared_ptr<::gloo::transport::Device> ProcessGrou
    return ::c10d::GlooDeviceFactory::makeDeviceForHostname(hostname, lazyInit);
  }
  
diff --git a/misc/py-pytorch/files/patch-torch_csrc_distributed_c10d_init.cpp b/misc/py-pytorch/files/patch-torch_csrc_distributed_c10d_init.cpp
new file mode 100644
index 000000000000..3e479bf2fb93
--- /dev/null
+++ b/misc/py-pytorch/files/patch-torch_csrc_distributed_c10d_init.cpp
@@ -0,0 +1,11 @@
+--- torch/csrc/distributed/c10d/init.cpp.orig	2026-04-17 22:57:32 UTC
++++ torch/csrc/distributed/c10d/init.cpp
+@@ -887,7 +887,7 @@ This class does not support ``__members__`` property.)
+             return ::c10d::ReduceOp(self);
+           })
+       .def(py::pickle(
+-          [](const ::c10d::ReduceOp& r) {
++          [](const ::c10d::ReduceOp& r) -> py::tuple {
+             // __getstate__
+             if (r.op_ != ::c10d::ReduceOp::RedOpType::PREMUL_SUM) {
+               return py::make_tuple(r.op_, py::none());
diff --git a/misc/py-pytorch/files/patch-torch_csrc_jit_python_init.cpp b/misc/py-pytorch/files/patch-torch_csrc_jit_python_init.cpp
new file mode 100644
index 000000000000..13a3e60871c8
--- /dev/null
+++ b/misc/py-pytorch/files/patch-torch_csrc_jit_python_init.cpp
@@ -0,0 +1,20 @@
+--- torch/csrc/jit/python/init.cpp.orig	2026-04-17 22:48:51 UTC
++++ torch/csrc/jit/python/init.cpp
+@@ -1796,7 +1796,7 @@ void initJITBindings(PyObject* module) {
+ 
+   m.def(
+       "_jit_get_operation",
+-      [](const std::string& op_name) {
++      [](const std::string& op_name) -> py::tuple {
+         try {
+           auto symbol = Symbol::fromQualString(op_name);
+           const auto sortedOps = getAllSortedOperatorsFor(symbol);
+@@ -1843,7 +1843,7 @@ void initJITBindings(PyObject* module) {
+       "_maybe_call_torch_function_for_op_packet",
+       [](py::handle op_overload_packet,
+          const py::args& args,
+-         const py::kwargs& kwargs) {
++         const py::kwargs& kwargs) -> py::tuple {
+         py::list ns_method =
+             op_overload_packet.attr("_qualified_op_name").attr("split")("::");
+         auto res = _maybe_handle_torch_function(
diff --git a/misc/py-pytorch/files/patch-torch_csrc_utils_python__arg__parser.cpp b/misc/py-pytorch/files/patch-torch_csrc_utils_python__arg__parser.cpp
new file mode 100644
index 000000000000..9ca1521de078
--- /dev/null
+++ b/misc/py-pytorch/files/patch-torch_csrc_utils_python__arg__parser.cpp
@@ -0,0 +1,17 @@
+--- torch/csrc/utils/python_arg_parser.cpp.orig	2026-04-17 22:45:38 UTC
++++ torch/csrc/utils/python_arg_parser.cpp
+@@ -755,9 +755,11 @@ auto handle_torch_function_indexing(
+   }
+   py::object func =
+       PyObject_FastGetAttrString(THPVariableClass, (char*)func_name);
+-  py::object args = (val == nullptr)
+-      ? py::make_tuple(py::handle(self), py::handle(index))
+-      : py::make_tuple(py::handle(self), py::handle(index), py::handle(val));
++  py::tuple args;
++  if (val == nullptr)
++    args = py::make_tuple(py::handle(self), py::handle(index));
++  else
++    args = py::make_tuple(py::handle(self), py::handle(index), py::handle(val));
+   return handle_torch_function_no_python_arg_parser(
+       overridable_args,
+       args.ptr(),
diff --git a/misc/py-pytorch/files/patch-torch_jit___trace.py b/misc/py-pytorch/files/patch-torch_jit___trace.py
new file mode 100644
index 000000000000..5415c1466564
--- /dev/null
+++ b/misc/py-pytorch/files/patch-torch_jit___trace.py
@@ -0,0 +1,16 @@
+--- torch/jit/_trace.py.orig	2026-04-18 00:49:58 UTC
++++ torch/jit/_trace.py
+@@ -445,8 +445,11 @@ def _check_trace(
+                     if n_mod.kindOf("value") != "t" or n_check.kindOf("value") != "t":
+                         continue
+ 
+-                    mod_tensor_val = n_mod.t("value")
+-                    check_tensor_val = n_check.t("value")
++                    try:
++                        mod_tensor_val = n_mod.t("value")
++                        check_tensor_val = n_check.t("value")
++                    except RuntimeError:
++                        continue
+ 
+                     try:
+                         torch.testing.assert_close(
diff --git a/misc/py-pytorch/pkg-descr b/misc/py-pytorch/pkg-descr
index 6378f7a68ec8..e92c7595f414 100644
--- a/misc/py-pytorch/pkg-descr
+++ b/misc/py-pytorch/pkg-descr
@@ -1,3 +1,10 @@
-PyTorch is a Python package that provides two high-level features:
+PyTorch is an open-source machine learning library for Python, based on the
+Torch library, used for applications such as natural language processing.
+It is primarily developed by Facebook's AI Research lab.
+
+PyTorch provides two high-level features:
 * Tensor computation (like NumPy) with strong GPU acceleration
 * Deep neural networks built on a tape-based autograd system
+
+It is a popular choice for deep learning research and production due to its
+flexibility and ease of use.


home | help

Want to link to this message? Use this
URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?69e2e08c.4577f.7d22215c>