Date: Mon, 8 May 2023 04:16:44 GMT From: Yuri Victorovich <yuri@FreeBSD.org> To: ports-committers@FreeBSD.org, dev-commits-ports-all@FreeBSD.org, dev-commits-ports-main@FreeBSD.org Subject: git: df996f881153 - main - misc/py-pytorch: New port: PyTorch: Tensors and dynamic neural networks in Python Message-ID: <202305080416.3484GiWq036980@gitrepo.freebsd.org>
next in thread | raw e-mail | index | archive | help
The branch main has been updated by yuri: URL: https://cgit.FreeBSD.org/ports/commit/?id=df996f881153e573aa8d0f90ac93eaf59e134ed5 commit df996f881153e573aa8d0f90ac93eaf59e134ed5 Author: Yuri Victorovich <yuri@FreeBSD.org> AuthorDate: 2023-05-08 04:03:49 +0000 Commit: Yuri Victorovich <yuri@FreeBSD.org> CommitDate: 2023-05-08 04:16:34 +0000 misc/py-pytorch: New port: PyTorch: Tensors and dynamic neural networks in Python --- misc/Makefile | 1 + misc/py-pytorch/Makefile | 57 ++++++++++++++++ misc/py-pytorch/distinfo | 3 + misc/py-pytorch/files/patch-CMakeLists.txt | 39 +++++++++++ ...aten_src_ATen_cpu_vec_vec256_vec256__bfloat16.h | 11 +++ ...aten_src_ATen_cpu_vec_vec512_vec512__bfloat16.h | 11 +++ ...native_sparse_ValidateCompressedIndicesCommon.h | 78 ++++++++++++++++++++++ .../files/patch-cmake_Dependencies.cmake | 11 +++ .../files/patch-cmake_public_mkldnn.cmake | 11 +++ .../patch-third__party_cpuinfo_CMakeLists.txt | 56 ++++++++++++++++ ...third__party_asmjit_src_asmjit_core_virtmem.cpp | 10 +++ ...hird__party_kineto_libkineto_src_ThreadUtil.cpp | 11 +++ misc/py-pytorch/pkg-descr | 3 + 13 files changed, 302 insertions(+) diff --git a/misc/Makefile b/misc/Makefile index 7d8034c8307b..6cc050e71a62 100644 --- a/misc/Makefile +++ b/misc/Makefile @@ -450,6 +450,7 @@ SUBDIR += py-pyprind SUBDIR += py-python-geohash SUBDIR += py-python-utils + SUBDIR += py-pytorch SUBDIR += py-qiskit-machine-learning SUBDIR += py-scikit-fusion SUBDIR += py-serverfiles diff --git a/misc/py-pytorch/Makefile b/misc/py-pytorch/Makefile new file mode 100644 index 000000000000..67e667515367 --- /dev/null +++ b/misc/py-pytorch/Makefile @@ -0,0 +1,57 @@ +PORTNAME= pytorch +DISTVERSIONPREFIX= v +DISTVERSION= 2.0.0 +CATEGORIES= misc # machine-learning +MASTER_SITES= https://github.com/pytorch/pytorch/releases/download/v${DISTVERSION}/ +PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX} +DIST_SUBDIR= ${PORTNAME} + +MAINTAINER= yuri@FreeBSD.org +COMMENT= PyTorch: Tensors and dynamic neural networks in Python +WWW= https://pytorch.org/ + +LICENSE= BSD3CLAUSE +LICENSE_FILE= ${WRKSRC}/LICENSE + +BUILD_DEPENDS= cmake:devel/cmake-core \ + gmake:devel/gmake \ + pybind11>0:devel/pybind11 \ + ${LOCALBASE}/include/fxdiv.h:devel/fxdiv \ + ${PYTHON_PKGNAMEPREFIX}typing-extensions>0:devel/py-typing-extensions@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}yaml>0:devel/py-yaml@${PY_FLAVOR} +LIB_DEPENDS= libopenblas.so:math/openblas \ + libmpi.so:net/openmpi \ + libonnx.so:misc/onnx \ + libpthreadpool.so:devel/pthreadpool \ + libprotobuf.so:devel/protobuf \ + libsleef.so:math/sleef +RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}filelock>0:sysutils/py-filelock@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}Jinja2>=0:devel/py-Jinja2@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}networkx>0:math/py-networkx@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}sympy>0:math/py-sympy@${PY_FLAVOR} \ + ${PYTHON_PKGNAMEPREFIX}typing-extensions>0:devel/py-typing-extensions@${PY_FLAVOR} +RUN_DEPENDS+= ${PYTHON_PKGNAMEPREFIX}dill>0:devel/py-dill@${PY_FLAVOR} # optional dependency + +USES= compiler:c++14-lang localbase:ldflags python +USE_PYTHON= distutils autoplist + +MAKE_ENV= USE_NINJA=no # ninja breaks for some reason +MAKE_ENV+= BUILD_TEST=0 # ninja breaks for some reason +LDFLAGS+= -lexecinfo + +BINARY_ALIAS= make=${GMAKE} + +POST_PLIST= fix-plist + +post-install: # strip binaries + @${STRIP_CMD} \ + ${STAGEDIR}${PYTHON_SITELIBDIR}/torch/bin/torch_shm_manager \ + ${STAGEDIR}${PYTHON_SITELIBDIR}/torch/_C${PYTHON_EXT_SUFFIX}.so \ + ${STAGEDIR}${PYTHON_SITELIBDIR}/torch/_C_flatbuffer${PYTHON_EXT_SUFFIX}.so \ + ${STAGEDIR}${PYTHON_SITELIBDIR}/functorch/_C${PYTHON_EXT_SUFFIX}.so \ + ${STAGEDIR}${PYTHON_SITELIBDIR}/torch/lib/lib*.so + +fix-plist: # remove the stray %%PYTHON_SITELIBDIR%%/caffe2 file + @${REINPLACE_CMD} -e "s|.*/caffe2$$||" ${TMPPLIST} + +.include <bsd.port.mk> diff --git a/misc/py-pytorch/distinfo b/misc/py-pytorch/distinfo new file mode 100644 index 000000000000..7deec686ed14 --- /dev/null +++ b/misc/py-pytorch/distinfo @@ -0,0 +1,3 @@ +TIMESTAMP = 1683446868 +SHA256 (pytorch/pytorch-v2.0.0.tar.gz) = cecc38b6d4256b810336edfc6119d7a57b701fdf1ba43c50001f31e2724fd8e2 +SIZE (pytorch/pytorch-v2.0.0.tar.gz) = 276643781 diff --git a/misc/py-pytorch/files/patch-CMakeLists.txt b/misc/py-pytorch/files/patch-CMakeLists.txt new file mode 100644 index 000000000000..9ebebccbf2e3 --- /dev/null +++ b/misc/py-pytorch/files/patch-CMakeLists.txt @@ -0,0 +1,39 @@ +--- CMakeLists.txt.orig 2023-04-03 19:45:59 UTC ++++ CMakeLists.txt +@@ -138,7 +138,7 @@ endif() + set(CPU_AARCH64 OFF) + set(CPU_INTEL OFF) + +-if(CMAKE_SYSTEM_PROCESSOR MATCHES "(AMD64|x86_64)") ++if(CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64|x86_64)") + set(CPU_INTEL ON) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64)") + set(CPU_AARCH64 ON) +@@ -163,7 +163,7 @@ include(CMakeDependentOption) + option(ATEN_NO_TEST "Do not build ATen test binaries" OFF) + option(BUILD_BINARY "Build C++ binaries" OFF) + option(BUILD_DOCS "Build Caffe2 documentation" OFF) +-option(BUILD_CUSTOM_PROTOBUF "Build and use Caffe2's own protobuf under third_party" ON) ++option(BUILD_CUSTOM_PROTOBUF "Build and use Caffe2's own protobuf under third_party" OFF) + option(BUILD_PYTHON "Build Python binaries" ON) + option(BUILD_CAFFE2 "Master flag to build Caffe2" OFF) + option(BUILD_LITE_INTERPRETER "Master flag to build Lite Interpreter" OFF) +@@ -398,15 +398,15 @@ endif() + # USE_SYSTEM_LIBS being "OFF". + option(USE_SYSTEM_LIBS "Use all available system-provided libraries." OFF) + option(USE_SYSTEM_CPUINFO "Use system-provided cpuinfo." OFF) +-option(USE_SYSTEM_SLEEF "Use system-provided sleef." OFF) ++option(USE_SYSTEM_SLEEF "Use system-provided sleef." ON) + option(USE_SYSTEM_GLOO "Use system-provided gloo." OFF) + option(USE_SYSTEM_FP16 "Use system-provided fp16." OFF) +-option(USE_SYSTEM_PYBIND11 "Use system-provided PyBind11." OFF) ++option(USE_SYSTEM_PYBIND11 "Use system-provided PyBind11." ON) + option(USE_SYSTEM_PTHREADPOOL "Use system-provided pthreadpool." OFF) + option(USE_SYSTEM_PSIMD "Use system-provided psimd." OFF) + option(USE_SYSTEM_FXDIV "Use system-provided fxdiv." OFF) + option(USE_SYSTEM_BENCHMARK "Use system-provided google benchmark." OFF) +-option(USE_SYSTEM_ONNX "Use system-provided onnx." OFF) ++option(USE_SYSTEM_ONNX "Use system-provided onnx." ON) + option(USE_SYSTEM_XNNPACK "Use system-provided xnnpack." OFF) + option(USE_GOLD_LINKER "Use ld.gold to link" OFF) + if(USE_SYSTEM_LIBS) diff --git a/misc/py-pytorch/files/patch-aten_src_ATen_cpu_vec_vec256_vec256__bfloat16.h b/misc/py-pytorch/files/patch-aten_src_ATen_cpu_vec_vec256_vec256__bfloat16.h new file mode 100644 index 000000000000..1fb789eebb7b --- /dev/null +++ b/misc/py-pytorch/files/patch-aten_src_ATen_cpu_vec_vec256_vec256__bfloat16.h @@ -0,0 +1,11 @@ +--- aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h.orig 2023-05-07 16:59:15 UTC ++++ aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h +@@ -206,7 +206,7 @@ template <> class Vectorized<BFloat16> { (public) + } + return b; + } +- Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const { ++ Vectorized<BFloat16> map(__m256 (*const vop)(__m256)) const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + const auto o1 = vop(lo); diff --git a/misc/py-pytorch/files/patch-aten_src_ATen_cpu_vec_vec512_vec512__bfloat16.h b/misc/py-pytorch/files/patch-aten_src_ATen_cpu_vec_vec512_vec512__bfloat16.h new file mode 100644 index 000000000000..59081cf2f2b0 --- /dev/null +++ b/misc/py-pytorch/files/patch-aten_src_ATen_cpu_vec_vec512_vec512__bfloat16.h @@ -0,0 +1,11 @@ +--- aten/src/ATen/cpu/vec/vec512/vec512_bfloat16.h.orig 2023-05-07 17:07:36 UTC ++++ aten/src/ATen/cpu/vec/vec512/vec512_bfloat16.h +@@ -283,7 +283,7 @@ template <> class Vectorized<BFloat16> { (public) + } + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wignored-qualifiers" +- Vectorized<BFloat16> map(const __m512 (*const vop)(__m512)) const { ++ Vectorized<BFloat16> map(__m512 (*const vop)(__m512)) const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + const auto o1 = vop(lo); diff --git a/misc/py-pytorch/files/patch-aten_src_ATen_native_sparse_ValidateCompressedIndicesCommon.h b/misc/py-pytorch/files/patch-aten_src_ATen_native_sparse_ValidateCompressedIndicesCommon.h new file mode 100644 index 000000000000..70dc6fbd61e2 --- /dev/null +++ b/misc/py-pytorch/files/patch-aten_src_ATen_native_sparse_ValidateCompressedIndicesCommon.h @@ -0,0 +1,78 @@ +--- aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h.orig 2023-05-07 08:51:40 UTC ++++ aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h +@@ -39,7 +39,7 @@ namespace { + // use `cidx/idx` to refer to `compressed_indices/plain_indices` respectively. + + INVARIANT_CHECK_FUNC_API +-_assert(const bool cond, const char* const message) { ++__assert(const bool cond, const char* const message) { + #ifdef GPUCC + CUDA_KERNEL_ASSERT(cond && message); + #else +@@ -57,9 +57,9 @@ INVARIANT_CHECK_FUNC_API _check_first_cidx_is_zero( + const index_t& zero) { + const bool invariant = cidx == zero; + if (cdim_name == CDimName::CRow) { +- _assert(invariant, "`crow_indices[..., 0] == 0` is not satisfied."); ++ __assert(invariant, "`crow_indices[..., 0] == 0` is not satisfied."); + } else { +- _assert(invariant, "`ccol_indices[..., 0] == 0` is not satisfied."); ++ __assert(invariant, "`ccol_indices[..., 0] == 0` is not satisfied."); + } + } + +@@ -71,9 +71,9 @@ INVARIANT_CHECK_FUNC_API _check_last_cidx_is_nnz( + const index_t& nnz) { + const bool invariant = cidx == nnz; + if (cdim_name == CDimName::CRow) { +- _assert(invariant, "`crow_indices[..., -1] == nnz` is not satisfied."); ++ __assert(invariant, "`crow_indices[..., -1] == nnz` is not satisfied."); + } else { +- _assert(invariant, "`ccol_indices[..., -1] == nnz` is not satisfied."); ++ __assert(invariant, "`ccol_indices[..., -1] == nnz` is not satisfied."); + } + } + +@@ -88,11 +88,11 @@ INVARIANT_CHECK_FUNC_API _check_cidx_nondecreasing_loc + const auto s_cidx = cidx_next - cidx; + const bool invariant = zero <= s_cidx && s_cidx <= dim; + if (cdim_name == CDimName::CRow) { +- _assert( ++ __assert( + invariant, + "`0 <= crow_indices[..., 1:] - crow_indices[..., :-1] <= ncols` is not satisfied."); + } else { +- _assert( ++ __assert( + invariant, + "`0 <= ccol_indices[..., 1:] - ccol_indices[..., :-1] <= nrows` is not satisfied."); + } +@@ -107,9 +107,9 @@ INVARIANT_CHECK_FUNC_API _check_idx_bounds( + const index_t& dim) { + const bool invariant = zero <= idx && idx < dim; + if (cdim_name == CDimName::CRow) { +- _assert(invariant, "`0 <= col_indices < ncols` is not satisfied."); ++ __assert(invariant, "`0 <= col_indices < ncols` is not satisfied."); + } else { +- _assert(invariant, "`0 <= row_indices < nrows` is not satisfied."); ++ __assert(invariant, "`0 <= row_indices < nrows` is not satisfied."); + } + } + +@@ -128,14 +128,14 @@ INVARIANT_CHECK_FUNC_API _check_idx_sorted_distinct_va + for (auto* RESTRICT curr = slice_begin + 1; curr < slice_end; ++curr) { + const auto invariant = *(curr - 1) < *curr; + if (cdim_name == CDimName::CRow) { +- _assert( ++ __assert( + invariant, + "`col_indices[..., crow_indices[..., i - 1]:crow_indices[..., i]] " + "for all i = 1, ..., nrows " + "are sorted and distinct along the last dimension values` " + "is not satisfied."); + } else { +- _assert( ++ __assert( + invariant, + "`row_indices[..., ccol_indices[..., i - 1]:ccol_indices[..., i]] " + "for all i = 1, ..., ncols " diff --git a/misc/py-pytorch/files/patch-cmake_Dependencies.cmake b/misc/py-pytorch/files/patch-cmake_Dependencies.cmake new file mode 100644 index 000000000000..165534d123bd --- /dev/null +++ b/misc/py-pytorch/files/patch-cmake_Dependencies.cmake @@ -0,0 +1,11 @@ +--- cmake/Dependencies.cmake.orig 2022-12-16 00:23:46 UTC ++++ cmake/Dependencies.cmake +@@ -339,7 +339,7 @@ if(USE_NNPACK OR USE_QNNPACK OR USE_PYTORCH_QNNPACK OR + set(DISABLE_NNPACK_AND_FAMILY ON) + endif() + else() +- if(NOT IOS AND NOT (CMAKE_SYSTEM_NAME MATCHES "^(Android|Linux|Darwin|Windows)$")) ++ if(NOT IOS AND NOT (CMAKE_SYSTEM_NAME MATCHES "^(Android|Linux|FreeBSD|Darwin|Windows)$")) + message(WARNING + "Target platform \"${CMAKE_SYSTEM_NAME}\" is not supported in {Q/X}NNPACK. " + "Supported platforms are Android, iOS, Linux, and macOS. " diff --git a/misc/py-pytorch/files/patch-cmake_public_mkldnn.cmake b/misc/py-pytorch/files/patch-cmake_public_mkldnn.cmake new file mode 100644 index 000000000000..77b3923f4e2b --- /dev/null +++ b/misc/py-pytorch/files/patch-cmake_public_mkldnn.cmake @@ -0,0 +1,11 @@ +--- cmake/public/mkldnn.cmake.orig 2022-12-16 00:23:46 UTC ++++ cmake/public/mkldnn.cmake +@@ -4,7 +4,7 @@ if(CPU_AARCH64) + include(${CMAKE_CURRENT_LIST_DIR}/ComputeLibrary.cmake) + endif() + +-find_package(MKLDNN QUIET) ++find_package(MKLDNN REQUIRED) + + if(NOT TARGET caffe2::mkldnn) + add_library(caffe2::mkldnn INTERFACE IMPORTED) diff --git a/misc/py-pytorch/files/patch-third__party_cpuinfo_CMakeLists.txt b/misc/py-pytorch/files/patch-third__party_cpuinfo_CMakeLists.txt new file mode 100644 index 000000000000..2b01f115b89e --- /dev/null +++ b/misc/py-pytorch/files/patch-third__party_cpuinfo_CMakeLists.txt @@ -0,0 +1,56 @@ +--- third_party/cpuinfo/CMakeLists.txt.orig 2023-04-03 19:46:00 UTC ++++ third_party/cpuinfo/CMakeLists.txt +@@ -65,7 +65,7 @@ IF(NOT CMAKE_SYSTEM_PROCESSOR) + "cpuinfo will compile, but cpuinfo_initialize() will always fail.") + SET(CPUINFO_SUPPORTED_PLATFORM FALSE) + ENDIF() +-ELSEIF(NOT CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?|armv[5-8].*|aarch64|arm64|ARM64)$") ++ELSEIF(NOT CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|amd64|x86(_64)?|armv[5-8].*|aarch64|arm64|ARM64)$") + MESSAGE(WARNING + "Target processor architecture \"${CPUINFO_TARGET_PROCESSOR}\" is not supported in cpuinfo. " + "cpuinfo will compile, but cpuinfo_initialize() will always fail.") +@@ -77,7 +77,7 @@ IF(NOT CMAKE_SYSTEM_NAME) + "Target operating system is not specified. " + "cpuinfo will compile, but cpuinfo_initialize() will always fail.") + SET(CPUINFO_SUPPORTED_PLATFORM FALSE) +-ELSEIF(NOT CMAKE_SYSTEM_NAME MATCHES "^(Windows|CYGWIN|MSYS|Darwin|Linux|Android)$") ++ELSEIF(NOT CMAKE_SYSTEM_NAME MATCHES "^(Windows|CYGWIN|MSYS|Darwin|Linux|FreeBSD|Android)$") + IF(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.14" AND NOT CMAKE_SYSTEM_NAME STREQUAL "iOS") + MESSAGE(WARNING + "Target operating system \"${CMAKE_SYSTEM_NAME}\" is not supported in cpuinfo. " +@@ -123,7 +123,7 @@ SET(CPUINFO_SRCS + src/cache.c) + + IF(CPUINFO_SUPPORTED_PLATFORM) +- IF(NOT CMAKE_SYSTEM_NAME STREQUAL "Emscripten" AND (CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$" OR IOS_ARCH MATCHES "^(i386|x86_64)$")) ++ IF(NOT CMAKE_SYSTEM_NAME STREQUAL "Emscripten" AND (CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|amd64|x86(_64)?)$" OR IOS_ARCH MATCHES "^(i386|x86_64)$")) + LIST(APPEND CPUINFO_SRCS + src/x86/init.c + src/x86/info.c +@@ -324,7 +324,7 @@ ENDIF() + # ---[ cpuinfo mock library and mock tests + IF(CPUINFO_SUPPORTED_PLATFORM AND CPUINFO_BUILD_MOCK_TESTS) + SET(CPUINFO_MOCK_SRCS "${CPUINFO_SRCS}") +- IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$") ++ IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|amd64|x86(_64)?)$") + LIST(APPEND CPUINFO_MOCK_SRCS src/x86/mockcpuid.c) + ENDIF() + IF(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "Android") +@@ -768,7 +768,7 @@ IF(CPUINFO_SUPPORTED_PLATFORM AND CPUINFO_BUILD_UNIT_T + ADD_TEST(NAME get-current-test COMMAND get-current-test) + ENDIF() + +- IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$") ++ IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|amd64|x86(_64)?)$") + ADD_EXECUTABLE(brand-string-test test/name/brand-string.cc) + CPUINFO_TARGET_ENABLE_CXX11(brand-string-test) + CPUINFO_TARGET_RUNTIME_LIBRARY(brand-string-test) +@@ -835,7 +835,7 @@ IF(CPUINFO_SUPPORTED_PLATFORM AND CPUINFO_BUILD_TOOLS) + CPUINFO_TARGET_RUNTIME_LIBRARY(cpuinfo-dump) + ENDIF() + +- IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$") ++ IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|amd64|x86(_64)?)$") + ADD_EXECUTABLE(cpuid-dump tools/cpuid-dump.c) + CPUINFO_TARGET_ENABLE_C99(cpuid-dump) + CPUINFO_TARGET_RUNTIME_LIBRARY(cpuid-dump) diff --git a/misc/py-pytorch/files/patch-third__party_fbgemm_third__party_asmjit_src_asmjit_core_virtmem.cpp b/misc/py-pytorch/files/patch-third__party_fbgemm_third__party_asmjit_src_asmjit_core_virtmem.cpp new file mode 100644 index 000000000000..09a801db1dfc --- /dev/null +++ b/misc/py-pytorch/files/patch-third__party_fbgemm_third__party_asmjit_src_asmjit_core_virtmem.cpp @@ -0,0 +1,10 @@ +--- third_party/fbgemm/third_party/asmjit/src/asmjit/core/virtmem.cpp.orig 2022-12-16 00:23:48 UTC ++++ third_party/fbgemm/third_party/asmjit/src/asmjit/core/virtmem.cpp +@@ -45,6 +45,7 @@ + #endif + + #include <atomic> ++#include <sys/stat.h> + + #if defined(__APPLE__) || defined(__BIONIC__) + #define ASMJIT_VM_SHM_DETECT 0 diff --git a/misc/py-pytorch/files/patch-third__party_kineto_libkineto_src_ThreadUtil.cpp b/misc/py-pytorch/files/patch-third__party_kineto_libkineto_src_ThreadUtil.cpp new file mode 100644 index 000000000000..f014a26e4f20 --- /dev/null +++ b/misc/py-pytorch/files/patch-third__party_kineto_libkineto_src_ThreadUtil.cpp @@ -0,0 +1,11 @@ +--- third_party/kineto/libkineto/src/ThreadUtil.cpp.orig 2023-04-03 19:46:02 UTC ++++ third_party/kineto/libkineto/src/ThreadUtil.cpp +@@ -57,7 +57,7 @@ int32_t systemThreadId() { + #elif defined _MSC_VER + _sysTid = (int32_t)GetCurrentThreadId(); + #else +- _sysTid = (int32_t)syscall(SYS_gettid); ++ _sysTid = (int32_t)syscall(SYS_getpid); + #endif + } + return _sysTid; diff --git a/misc/py-pytorch/pkg-descr b/misc/py-pytorch/pkg-descr new file mode 100644 index 000000000000..6378f7a68ec8 --- /dev/null +++ b/misc/py-pytorch/pkg-descr @@ -0,0 +1,3 @@ +PyTorch is a Python package that provides two high-level features: +* Tensor computation (like NumPy) with strong GPU acceleration +* Deep neural networks built on a tape-based autograd system
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202305080416.3484GiWq036980>