diff options
author | Alfredo Tupone <tupone@gentoo.org> | 2024-11-14 10:53:46 +0100 |
---|---|---|
committer | Alfredo Tupone <tupone@gentoo.org> | 2024-11-14 10:56:12 +0100 |
commit | 123bc9bdc2cc52cb0d56a13d6d5f2c711e7c688f (patch) | |
tree | fdecd0d236db1b074c11691a70cce18942e0f03b /sci-libs | |
parent | dev-lang/R: add 4.4.2 (diff) | |
download | gentoo-123bc9bdc2cc52cb0d56a13d6d5f2c711e7c688f.tar.gz gentoo-123bc9bdc2cc52cb0d56a13d6d5f2c711e7c688f.tar.bz2 gentoo-123bc9bdc2cc52cb0d56a13d6d5f2c711e7c688f.zip |
sci-libs/caffe2: add 2.5.1
Signed-off-by: Alfredo Tupone <tupone@gentoo.org>
Diffstat (limited to 'sci-libs')
-rw-r--r-- | sci-libs/caffe2/Manifest | 1 | ||||
-rw-r--r-- | sci-libs/caffe2/caffe2-2.5.1.ebuild | 305 | ||||
-rw-r--r-- | sci-libs/caffe2/files/caffe2-2.5.1-cpp-httplib.patch | 13 | ||||
-rw-r--r-- | sci-libs/caffe2/files/caffe2-2.5.1-cudnn_include_fix.patch | 11 | ||||
-rw-r--r-- | sci-libs/caffe2/files/caffe2-2.5.1-exclude-aotriton.patch | 22 | ||||
-rw-r--r-- | sci-libs/caffe2/files/caffe2-2.5.1-fix-functorch-install.patch | 8 | ||||
-rw-r--r-- | sci-libs/caffe2/files/caffe2-2.5.1-gentoo.patch | 127 | ||||
-rw-r--r-- | sci-libs/caffe2/files/caffe2-2.5.1-glog-0.6.0.patch | 29 | ||||
-rw-r--r-- | sci-libs/caffe2/files/caffe2-2.5.1-install-dirs.patch | 11 | ||||
-rw-r--r-- | sci-libs/caffe2/files/caffe2-2.5.1-unbundle_fmt.patch | 10 | ||||
-rw-r--r-- | sci-libs/caffe2/files/caffe2-2.5.1-unbundle_kineto.patch | 22 |
11 files changed, 559 insertions, 0 deletions
diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest index 1bdb2764edd1..7f0b51661b1b 100644 --- a/sci-libs/caffe2/Manifest +++ b/sci-libs/caffe2/Manifest @@ -1,3 +1,4 @@ DIST caffe2-patches-20240809.tar.gz 15242 BLAKE2B 77503c61487e7d85cca5afcab9a6e638f9833a70861845638cf1b62bc492d7b6650e6db81d53ebb2f39c6313509250d339f725f04d03ec6dd23dd0cf70843d8c SHA512 74b3b0b6671b655ecac93f7436c4ed7cb0157a83aafbf6afcc0811e11cef341cd8f638db1a111bcbb01e1a6dd4daf3a36b96d7a8ce90f04c2fa091bd6e3a142b DIST pytorch-2.4.0.tar.gz 115031093 BLAKE2B d206477963977011627df284efa01482fbf57e9fcb5f58f51d679c742b8e5dde6aa6affd8745ab817fcd09477d129a81e74e07be576b5d3585eaca1c735b8e01 SHA512 804d25944035f33de6591fd942fbda44d3de037717a4397d38a97474b01775d30eaf93d16dd708a832c0119050d24d73b90990fd3e3773be79d26ada25244d22 DIST pytorch-2.4.1.tar.gz 115029469 BLAKE2B c2909ff27d527bc57cba56b780d3b8cd07a043ab045caa6c6b27857a16f9ad10aaab2116b26226b1e46ee08ffb44007965d914464418e4ae14ca48c3f3f383bb SHA512 7e9b4485e242eaf0d648765c6621d73d95e7107b766646a098175436d1ab2e2b864badd0757a3bab6b7c318233f2120bad9ac07b39bb9e357897919580c87631 +DIST pytorch-2.5.1.tar.gz 116091366 BLAKE2B 7838b17562b94ffc7d798031348689db607dd5eae2a3c35be365972e2b52a2c1b12067068d5aca5ab00cf0977d9c2c3c9ae5337d69534c864c732e6256cbeef6 SHA512 a913a466324a65fa3d79c5e9ad4d605fc7976f0134fda2f81aaa3cea29d56926604999b8a238759646d211e63b47bbb446cdffa86ca8defd8159f11e30301289 diff --git a/sci-libs/caffe2/caffe2-2.5.1.ebuild b/sci-libs/caffe2/caffe2-2.5.1.ebuild new file mode 100644 index 000000000000..81e3a916f3b3 --- /dev/null +++ b/sci-libs/caffe2/caffe2-2.5.1.ebuild @@ -0,0 +1,305 @@ +# Copyright 2022-2024 Gentoo Authors +# Distributed under the terms of the GNU General Public License v2 + +EAPI=8 + +PYTHON_COMPAT=( python3_{10..12} ) +ROCM_VERSION=6.1 +inherit python-single-r1 cmake cuda flag-o-matic prefix rocm toolchain-funcs + +MYPN=pytorch +MYP=${MYPN}-${PV} + +DESCRIPTION="A deep learning framework" +HOMEPAGE="https://pytorch.org/" +SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz + -> ${MYP}.tar.gz" + +S="${WORKDIR}"/${MYP} + +LICENSE="BSD" +SLOT="0" +KEYWORDS="~amd64" +IUSE="cuda distributed fbgemm flash gloo mkl mpi nnpack +numpy onednn openblas opencl openmp qnnpack rocm xnnpack" +RESTRICT="test" +REQUIRED_USE=" + ${PYTHON_REQUIRED_USE} + mpi? ( distributed ) + gloo? ( distributed ) + ?? ( cuda rocm ) + rocm? ( + || ( ${ROCM_REQUIRED_USE} ) + !flash + ) +" + +RDEPEND=" + ${PYTHON_DEPS} + dev-cpp/abseil-cpp:= + dev-cpp/gflags:= + >=dev-cpp/glog-0.5.0 + dev-libs/cpuinfo + dev-libs/libfmt + dev-cpp/opentelemetry-cpp + dev-libs/protobuf:= + dev-libs/pthreadpool + dev-libs/sleef[cpu_flags_x86_avx512f(+),cpu_flags_x86_avx(+)] + dev-libs/sleef[cpu_flags_x86_sse3(+),cpu_flags_x86_ssse3(+)] + dev-libs/sleef[cpu_flags_x86_sse4_1(+),cpu_flags_x86_sse4_2(+)] + virtual/lapack + sci-libs/onnx + sci-libs/foxi + cuda? ( + dev-libs/cudnn + >=dev-libs/cudnn-frontend-1.0.3:0/8 + <dev-util/nvidia-cuda-toolkit-12.5:=[profiler] + ) + fbgemm? ( >=dev-libs/FBGEMM-2023.12.01 ) + gloo? ( sci-libs/gloo[cuda?] ) + mpi? ( virtual/mpi ) + nnpack? ( sci-libs/NNPACK ) + numpy? ( $(python_gen_cond_dep ' + dev-python/numpy[${PYTHON_USEDEP}] + ') ) + onednn? ( dev-libs/oneDNN ) + opencl? ( virtual/opencl ) + qnnpack? ( + !sci-libs/QNNPACK + dev-cpp/gemmlowp + ) + rocm? ( + =dev-util/hip-6.1* + =dev-libs/rccl-6.1*[${ROCM_USEDEP}] + =sci-libs/rocThrust-6.1*[${ROCM_USEDEP}] + =sci-libs/rocPRIM-6.1*[${ROCM_USEDEP}] + =sci-libs/hipBLAS-6.1*[${ROCM_USEDEP}] + =sci-libs/hipFFT-6.1*[${ROCM_USEDEP}] + =sci-libs/hipSPARSE-6.1*[${ROCM_USEDEP}] + =sci-libs/hipRAND-6.1*[${ROCM_USEDEP}] + =sci-libs/hipCUB-6.1*[${ROCM_USEDEP}] + =sci-libs/hipSOLVER-6.1*[${ROCM_USEDEP}] + =sci-libs/miopen-6.1*[${ROCM_USEDEP}] + =dev-util/roctracer-6.1*[${ROCM_USEDEP}] + + =sci-libs/hipBLASLt-6.1* + amdgpu_targets_gfx90a? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx90a] ) + amdgpu_targets_gfx940? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx940] ) + amdgpu_targets_gfx941? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx941] ) + amdgpu_targets_gfx942? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx942] ) + ) + distributed? ( + sci-libs/tensorpipe[cuda?] + dev-cpp/cpp-httplib + ) + xnnpack? ( >=sci-libs/XNNPACK-2024.02.29 ) + mkl? ( sci-libs/mkl ) + openblas? ( sci-libs/openblas ) +" +DEPEND=" + ${RDEPEND} + cuda? ( >=dev-libs/cutlass-3.4.1 ) + onednn? ( sci-libs/ideep ) + dev-libs/psimd + dev-libs/FP16 + dev-libs/FXdiv + dev-libs/pocketfft + dev-libs/flatbuffers + >=sci-libs/kineto-0.4.0_p20240525 + $(python_gen_cond_dep ' + dev-python/pyyaml[${PYTHON_USEDEP}] + dev-python/pybind11[${PYTHON_USEDEP}] + dev-python/typing-extensions[${PYTHON_USEDEP}] + ') +" + +PATCHES=( + "${FILESDIR}"/${P}-unbundle_fmt.patch + "${FILESDIR}"/${P}-unbundle_kineto.patch + "${FILESDIR}"/${P}-fix-functorch-install.patch + "${FILESDIR}"/${P}-cudnn_include_fix.patch + "${FILESDIR}"/${P}-gentoo.patch + "${FILESDIR}"/${P}-cpp-httplib.patch + "${FILESDIR}"/${P}-glog-0.6.0.patch +) + +src_prepare() { + filter-lto #bug 862672 + + # Unbundle fmt + sed -i \ + -e 's|::fmt-header-only||' \ + c10/CMakeLists.txt \ + cmake/Dependencies.cmake \ + torch/CMakeLists.txt \ + || die + # Drop third_party from CMake tree + sed -i \ + -e '/add_subdirectory.*third_party/d' \ + CMakeLists.txt \ + cmake/Dependencies.cmake \ + cmake/ProtoBuf.cmake \ + aten/src/ATen/CMakeLists.txt \ + || die + cmake_src_prepare + pushd torch/csrc/jit/serialization || die + flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die + popd + # prefixify the hardcoded paths, after all patches are applied + hprefixify \ + aten/CMakeLists.txt \ + caffe2/CMakeLists.txt \ + cmake/Metal.cmake \ + cmake/Modules/*.cmake \ + cmake/Modules_CUDA_fix/FindCUDNN.cmake \ + cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \ + cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \ + cmake/public/LoadHIP.cmake \ + cmake/public/cuda.cmake \ + cmake/Dependencies.cmake \ + torch/CMakeLists.txt \ + CMakeLists.txt + + if use rocm; then + sed -e "s:/opt/rocm:/usr:" \ + -e "s:lib/cmake:$(get_libdir)/cmake:g" \ + -e "s/HIP 1.0/HIP 1.0 REQUIRED/" \ + -i cmake/public/LoadHIP.cmake || die + + ebegin "HIPifying cuda sources" + ${EPYTHON} tools/amd_build/build_amd.py || die + eend $? + fi +} + +src_configure() { + if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then + ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0." + ewarn "These may not be optimal for your GPU." + ewarn "" + ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU," + ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2." + ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5 3.5" + ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell" + ewarn "" + ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus" + ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'" + fi + + local mycmakeargs=( + -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir) + -DPython_EXECUTABLE="${PYTHON}" + -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir) + -DUSE_CCACHE=OFF + -DUSE_CUDA=$(usex cuda) + -DUSE_DISTRIBUTED=$(usex distributed) + -DUSE_FAKELOWP=OFF + -DUSE_FBGEMM=$(usex fbgemm) + -DUSE_FLASH_ATTENTION=$(usex flash) + -DUSE_GFLAGS=ON + -DUSE_GLOG=ON + -DUSE_GLOO=$(usex gloo) + -DUSE_ITT=OFF + -DUSE_KINETO=OFF # TODO + -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma + -DUSE_MEM_EFF_ATTENTION=OFF + -DUSE_MKLDNN=$(usex onednn) + -DUSE_MPI=$(usex mpi) + -DUSE_NCCL=OFF + -DUSE_NNPACK=$(usex nnpack) + -DUSE_NUMA=OFF + -DUSE_NUMPY=$(usex numpy) + -DUSE_OPENCL=$(usex opencl) + -DUSE_OPENMP=$(usex openmp) + -DUSE_PYTORCH_QNNPACK=$(usex qnnpack) + -DUSE_PYTORCH_METAL=OFF + -DUSE_ROCM=$(usex rocm) + -DUSE_SYSTEM_LIBS=ON + -DUSE_TENSORPIPE=$(usex distributed) + -DUSE_UCC=OFF + -DUSE_VALGRIND=OFF + -DUSE_XNNPACK=$(usex xnnpack) + -DUSE_XPU=OFF + -Wno-dev + ) + + if use mkl; then + mycmakeargs+=(-DBLAS=MKL) + elif use openblas; then + mycmakeargs+=(-DBLAS=OpenBLAS) + else + mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=) + fi + + if use cuda; then + addpredict "/dev/nvidiactl" # bug 867706 + addpredict "/dev/char" + addpredict "/proc/self/task" # bug 926116 + + mycmakeargs+=( + -DUSE_CUDNN=ON + -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}" + -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library + -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")" + ) + elif use rocm; then + export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)" + + mycmakeargs+=( + -DUSE_NCCL=ON + -DUSE_SYSTEM_NCCL=ON + ) + + # ROCm libraries produce too much warnings + append-cxxflags -Wno-deprecated-declarations -Wno-unused-result + + if tc-is-clang; then + # fix mangling in LLVM: https://github.com/llvm/llvm-project/issues/85656 + append-cxxflags -fclang-abi-compat=17 + fi + fi + + if use onednn; then + mycmakeargs+=( + -DMKLDNN_FOUND=ON + -DMKLDNN_LIBRARIES=dnnl + -DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl" + ) + fi + + cmake_src_configure +} + +src_compile() { + PYTORCH_BUILD_VERSION=${PV} \ + PYTORCH_BUILD_NUMBER=0 \ + cmake_src_compile +} + +src_install() { + cmake_src_install + + insinto "/var/lib/${PN}" + doins "${BUILD_DIR}"/CMakeCache.txt + + rm -rf python + mkdir -p python/torch || die + cp torch/version.py python/torch/ || die + python_domodule python/torch + + dodir $(python_get_sitedir)/torch/bin + dodir $(python_get_sitedir)/torch/lib + dodir $(python_get_sitedir)/torch/include + + ln -s ../../../../../include/torch \ + "${D}$(python_get_sitedir)"/torch/include/torch || die # bug 923269 + + + mv "${ED}"/usr/bin/torch_shm_manager \ + "${ED}"/$(python_get_sitedir)/torch/bin/ || die + + mv "${ED}"/usr/$(get_libdir)/libtorch_global_deps.so \ + "${ED}"/$(python_get_sitedir)/torch/lib/ || die + + mv "${ED}"/usr/lib/libc10*.so \ + "${ED}"/usr/$(get_libdir)/ || die +} diff --git a/sci-libs/caffe2/files/caffe2-2.5.1-cpp-httplib.patch b/sci-libs/caffe2/files/caffe2-2.5.1-cpp-httplib.patch new file mode 100644 index 000000000000..5d684a4a4738 --- /dev/null +++ b/sci-libs/caffe2/files/caffe2-2.5.1-cpp-httplib.patch @@ -0,0 +1,13 @@ +--- a/torch/lib/libshm/CMakeLists.txt 2024-09-04 06:09:51.943752841 +0200 ++++ b/torch/lib/libshm/CMakeLists.txt 2024-09-04 06:10:52.243821438 +0200 +@@ -24,6 +24,10 @@ + CXX_STANDARD 17) + target_link_libraries(shm PRIVATE ${TORCH_CPU_LIB}) + ++if (USE_DISTRIBUTED) ++ target_link_libraries(shm PRIVATE cpp-httplib) ++endif() ++ + if(UNIX AND NOT APPLE) + include(CheckLibraryExists) + find_package(Threads REQUIRED) diff --git a/sci-libs/caffe2/files/caffe2-2.5.1-cudnn_include_fix.patch b/sci-libs/caffe2/files/caffe2-2.5.1-cudnn_include_fix.patch new file mode 100644 index 000000000000..77905dbd1ac8 --- /dev/null +++ b/sci-libs/caffe2/files/caffe2-2.5.1-cudnn_include_fix.patch @@ -0,0 +1,11 @@ +--- a/cmake/Dependencies.cmake 2024-04-29 18:37:34.005639858 +0200 ++++ b/cmake/Dependencies.cmake 2024-04-29 18:39:29.126587738 +0200 +@@ -1235,7 +1235,7 @@ + if(CUDNN_VERSION VERSION_LESS 8.5) + message(FATAL_ERROR "PyTorch needs CuDNN-8.5 or above, but found ${CUDNN_VERSION}. Builds are still possible with `USE_CUDNN=0`") + endif() +- set(CUDNN_FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/../third_party/cudnn_frontend/include) ++ set(CUDNN_FRONTEND_INCLUDE_DIR /opt/cuda/include) + target_include_directories(torch::cudnn INTERFACE ${CUDNN_FRONTEND_INCLUDE_DIR}) + endif() + diff --git a/sci-libs/caffe2/files/caffe2-2.5.1-exclude-aotriton.patch b/sci-libs/caffe2/files/caffe2-2.5.1-exclude-aotriton.patch new file mode 100644 index 000000000000..340d6e45c95a --- /dev/null +++ b/sci-libs/caffe2/files/caffe2-2.5.1-exclude-aotriton.patch @@ -0,0 +1,22 @@ +Disables aotriton download when both USE_FLASH_ATTENTION and USE_MEM_EFF_ATTENTION cmake flags are OFF +Backports upstream PR to 2.3.0: https://github.com/pytorch/pytorch/pull/130197 +--- a/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp ++++ b/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp +@@ -659,7 +659,7 @@ bool can_use_mem_efficient_attention(sdp_params const& params, bool debug) { + array_of<at::ScalarType>(at::kHalf, at::kFloat, at::kBFloat16); + constexpr auto less_than_sm80_mem_efficient_dtypes = + array_of<at::ScalarType>(at::kHalf, at::kFloat); +-#ifdef USE_ROCM ++#if defined(USE_ROCM) && defined(USE_MEM_EFF_ATTENTION) + constexpr auto aotriton_mem_efficient_dtypes = + array_of<at::ScalarType>(at::kHalf, at::kFloat, at::kBFloat16); + #endif +@@ -709,7 +709,7 @@ bool can_use_mem_efficient_attention(sdp_params const& params, bool debug) { + } + } + +-#ifdef USE_ROCM ++#if defined(USE_ROCM) && defined(USE_MEM_EFF_ATTENTION) + return check_tensor_dtype(params, aotriton_mem_efficient_dtypes, debug); + #else + auto dprop = at::cuda::getCurrentDeviceProperties(); diff --git a/sci-libs/caffe2/files/caffe2-2.5.1-fix-functorch-install.patch b/sci-libs/caffe2/files/caffe2-2.5.1-fix-functorch-install.patch new file mode 100644 index 000000000000..ffce0028a556 --- /dev/null +++ b/sci-libs/caffe2/files/caffe2-2.5.1-fix-functorch-install.patch @@ -0,0 +1,8 @@ +--- a/functorch/CMakeLists.txt 2024-11-10 11:18:29.151992840 +0100 ++++ b/functorch/CMakeLists.txt 2024-11-10 11:20:19.642389982 +0100 +@@ -42,4 +42,4 @@ + if(NOT ${TORCH_PYTHON_LINK_FLAGS} STREQUAL "") + set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS ${TORCH_PYTHON_LINK_FLAGS}) + endif() +-install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}") ++install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_INSTALL_LIBDIR}") diff --git a/sci-libs/caffe2/files/caffe2-2.5.1-gentoo.patch b/sci-libs/caffe2/files/caffe2-2.5.1-gentoo.patch new file mode 100644 index 000000000000..f923b6746a4b --- /dev/null +++ b/sci-libs/caffe2/files/caffe2-2.5.1-gentoo.patch @@ -0,0 +1,127 @@ +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -986,12 +986,11 @@ endif() + # third_party/FBGEMM + include(cmake/public/utils.cmake) + if(NOT MSVC) +- string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC") ++ string(APPEND CMAKE_CXX_FLAGS " -O2") + # Eigen fails to build with some versions, so convert this to a warning + # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459 + string(APPEND CMAKE_CXX_FLAGS " -Wall") + string(APPEND CMAKE_CXX_FLAGS " -Wextra") +- append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS) +@@ -1085,7 +1084,6 @@ if(NOT MSVC) + string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0") + append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS) +- append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS) + else() + # skip unwanted includes from windows.h + add_compile_definitions(WIN32_LEAN_AND_MEAN) +--- a/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt ++++ b/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt +@@ -324,16 +324,8 @@ set_target_properties(pytorch_qnnpack PROPERTIES PUBLIC_HEADER include/pytorch_q + set_target_properties(pytorch_qnnpack PROPERTIES PUBLIC_HEADER include/qnnpack_func.h) + + # ---[ Configure clog +-if(NOT TARGET clog) +- set(CLOG_BUILD_TESTS OFF CACHE BOOL "") +- set(CLOG_RUNTIME_TYPE "${CPUINFO_RUNTIME_TYPE}" CACHE STRING "") +- add_subdirectory( +- "${CLOG_SOURCE_DIR}" +- "${CONFU_DEPENDENCIES_BINARY_DIR}/clog") +- # We build static version of clog but a dynamic library may indirectly depend on it +- set_property(TARGET clog PROPERTY POSITION_INDEPENDENT_CODE ON) +-endif() +-target_link_libraries(pytorch_qnnpack PUBLIC clog) ++find_library(CLOG_LIBRARY NAMES clog REQUIRED) ++target_link_libraries(pytorch_qnnpack PUBLIC ${CLOG_LIBRARY}) + + # ---[ Configure cpuinfo + if(NOT TARGET cpuinfo AND USE_SYSTEM_CPUINFO) +--- a/caffe2/CMakeLists.txt ++++ b/caffe2/CMakeLists.txt +@@ -87,7 +87,7 @@ endif() + # Note: the folders that are being commented out have not been properly + # addressed yet. + +-if(NOT MSVC AND USE_XNNPACK) ++if(FALSE) + if(NOT TARGET fxdiv) + set(FXDIV_BUILD_TESTS OFF CACHE BOOL "") + set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "") +@@ -1081,7 +1081,6 @@ if(USE_XPU) + endif() + + if(NOT MSVC AND USE_XNNPACK) +- TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv) + endif() + + # ========================================================== +--- a/cmake/Codegen.cmake ++++ b/cmake/Codegen.cmake +@@ -57,7 +57,7 @@ if(INTERN_BUILD_ATEN_OPS) + if(MSVC) + set(OPT_FLAG "/fp:strict ") + else(MSVC) +- set(OPT_FLAG "-O3 ") ++ set(OPT_FLAG " ") + if("${CMAKE_BUILD_TYPE}" MATCHES "Debug") + set(OPT_FLAG " ") + endif() +--- a/cmake/Dependencies.cmake ++++ b/cmake/Dependencies.cmake +@@ -473,7 +473,9 @@ if(USE_PYTORCH_QNNPACK) + set_property(TARGET pytorch_qnnpack PROPERTY POSITION_INDEPENDENT_CODE ON) + set_property(TARGET cpuinfo PROPERTY POSITION_INDEPENDENT_CODE ON) + # QNNPACK depends on gemmlowp headers +- target_include_directories(pytorch_qnnpack PRIVATE "${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp") ++ find_package(gemmlowp REQUIRED) ++ get_target_property(GEMMLOWP_INCLUDE_DIRS gemmlowp::gemmlowp INTERFACE_INCLUDE_DIRECTORIES) ++ target_include_directories(pytorch_qnnpack PRIVATE ${GEMMLOWP_INCLUDE_DIRS}) + + if(PYTORCH_QNNPACK_CUSTOM_THREADPOOL) + target_compile_definitions( +@@ -710,7 +712,7 @@ if(BUILD_TEST OR BUILD_MOBILE_BENCHMARK OR BUILD_MOBILE_TEST) + endif() + + # ---[ FBGEMM +-if(USE_FBGEMM) ++if(FALSE) + set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party") + if(NOT DEFINED FBGEMM_SOURCE_DIR) + set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING "FBGEMM source directory") +@@ -758,6 +760,7 @@ if(USE_FBGEMM) + endif() + + if(USE_FBGEMM) ++ list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm) + caffe2_update_option(USE_FBGEMM ON) + else() + caffe2_update_option(USE_FBGEMM OFF) +--- a/cmake/External/nnpack.cmake ++++ b/cmake/External/nnpack.cmake +@@ -56,7 +56,7 @@ if(ANDROID OR IOS OR ${CMAKE_SYSTEM_NAME} STREQUAL "Linux" OR ${CMAKE_SYSTEM_NAM + set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE STRING "pthreadpool source directory") + set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE STRING "Google Test source directory") + +- if(NOT TARGET nnpack) ++ if(FALSE) + if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL) + set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "") + endif() +--- a/cmake/public/utils.cmake ++++ b/cmake/public/utils.cmake +@@ -422,8 +422,6 @@ function(torch_compile_options libname) + endif() + + # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression) +- target_compile_options(${libname} PRIVATE +- $<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>) + + endfunction() + diff --git a/sci-libs/caffe2/files/caffe2-2.5.1-glog-0.6.0.patch b/sci-libs/caffe2/files/caffe2-2.5.1-glog-0.6.0.patch new file mode 100644 index 000000000000..6c06d2cca654 --- /dev/null +++ b/sci-libs/caffe2/files/caffe2-2.5.1-glog-0.6.0.patch @@ -0,0 +1,29 @@ +https://github.com/pytorch/pytorch/issues/58054 + +--- a/c10/util/Logging.cpp ++++ b/c10/util/Logging.cpp +@@ -192,23 +192,13 @@ + google::GLOG_WARNING, + "The minimum log level that caffe2 will output."); + +-// Google glog's api does not have an external function that allows one to check +-// if glog is initialized or not. It does have an internal function - so we are +-// declaring it here. This is a hack but has been used by a bunch of others too +-// (e.g. Torch). +-namespace google { +-namespace glog_internal_namespace_ { +-bool IsGoogleLoggingInitialized(); +-} // namespace glog_internal_namespace_ +-} // namespace google +- + namespace c10 { + namespace { + + void initGoogleLogging(char const* name) { + #if !defined(_MSC_VER) + // This trick can only be used on UNIX platforms +- if (!::google::glog_internal_namespace_::IsGoogleLoggingInitialized()) ++ if (!::google::IsGoogleLoggingInitialized()) + #endif + { + ::google::InitGoogleLogging(name); diff --git a/sci-libs/caffe2/files/caffe2-2.5.1-install-dirs.patch b/sci-libs/caffe2/files/caffe2-2.5.1-install-dirs.patch new file mode 100644 index 000000000000..e99b7e59cb5b --- /dev/null +++ b/sci-libs/caffe2/files/caffe2-2.5.1-install-dirs.patch @@ -0,0 +1,11 @@ +--- a/c10/CMakeLists.txt ++++ b/c10/CMakeLists.txt +@@ -162,7 +162,7 @@ if(NOT BUILD_LIBTORCHLESS) + # Note: for now, we will put all export path into one single Caffe2Targets group + # to deal with the cmake deployment need. Inside the Caffe2Targets set, the + # individual libraries like libc10.so and libcaffe2.so are still self-contained. +- install(TARGETS c10 EXPORT Caffe2Targets DESTINATION lib) ++ install(TARGETS c10 EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR}) + endif() + + install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR} diff --git a/sci-libs/caffe2/files/caffe2-2.5.1-unbundle_fmt.patch b/sci-libs/caffe2/files/caffe2-2.5.1-unbundle_fmt.patch new file mode 100644 index 000000000000..2594a56b36a5 --- /dev/null +++ b/sci-libs/caffe2/files/caffe2-2.5.1-unbundle_fmt.patch @@ -0,0 +1,10 @@ +--- a/cmake/Dependencies.cmake ++++ b/cmake/Dependencies.cmake +@@ -1522,7 +1522,6 @@ + # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know + # `fmt` is compatible with a superset of the compilers that PyTorch is, it + # shouldn't be too bad to just disable the checks. +-set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "") + + list(APPEND Caffe2_DEPENDENCY_LIBS fmt) + set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE) diff --git a/sci-libs/caffe2/files/caffe2-2.5.1-unbundle_kineto.patch b/sci-libs/caffe2/files/caffe2-2.5.1-unbundle_kineto.patch new file mode 100644 index 000000000000..ebe931bc49b6 --- /dev/null +++ b/sci-libs/caffe2/files/caffe2-2.5.1-unbundle_kineto.patch @@ -0,0 +1,22 @@ +--- a/torch/CMakeLists.txt 2024-11-09 20:50:21.806784527 +0100 ++++ b/torch/CMakeLists.txt 2024-11-09 20:52:56.303892418 +0100 +@@ -67,7 +67,7 @@ + ${TORCH_ROOT}/third_party/gloo + ${TORCH_ROOT}/third_party/onnx + ${TORCH_ROOT}/third_party/flatbuffers/include +- ${TORCH_ROOT}/third_party/kineto/libkineto/include ++ "/usr/include/kineto" + ${TORCH_ROOT}/third_party/cpp-httplib + ${TORCH_ROOT}/third_party/nlohmann/include + +--- a/caffe2/CMakeLists.txt 2024-11-09 20:58:37.698085144 +0100 ++++ b/caffe2/CMakeLists.txt 2024-11-09 20:59:15.771391643 +0100 +@@ -1185,7 +1185,7 @@ + ${TORCH_ROOT}/third_party/miniz-2.1.0) + + target_include_directories(torch_cpu PRIVATE +- ${TORCH_ROOT}/third_party/kineto/libkineto/include) ++ "/usr/include/kineto") + + if(USE_KINETO) + target_include_directories(torch_cpu PRIVATE |