diff --git a/.circleci/config.yml b/.circleci/config.yml index c1d872ad41..b7269b66ee 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,7 +4,7 @@ orbs: python: circleci/python@3.0.0 jobs: - manylinux2014-aarch64: + manylinux_2_28-aarch64: parameters: NRN_PYTHON_VERSION_MINOR: @@ -31,7 +31,7 @@ jobs: -e NRN_RELEASE_UPLOAD \ -e SETUPTOOLS_SCM_PRETEND_VERSION \ -e NRN_BUILD_FOR_UPLOAD=1 \ - 'neuronsimulator/neuron_wheel:latest-aarch64' \ + 'docker.io/neuronsimulator/neuron_wheel:manylinux_2_28_aarch64' \ packaging/python/build_wheels.bash linux 3<< parameters.NRN_PYTHON_VERSION_MINOR >> coreneuron - store_artifacts: @@ -71,7 +71,7 @@ workflows: build-workflow: jobs: - - manylinux2014-aarch64: + - manylinux_2_28-aarch64: filters: branches: only: @@ -91,7 +91,7 @@ workflows: only: - master jobs: - - manylinux2014-aarch64: + - manylinux_2_28-aarch64: matrix: parameters: NRN_PYTHON_VERSION_MINOR: ["9", "10", "11", "12", "13"] diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 5970a4243f..a081844ac0 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -70,7 +70,7 @@ stages: -e NRN_RELEASE_UPLOAD \ -e SETUPTOOLS_SCM_PRETEND_VERSION \ -e NRN_BUILD_FOR_UPLOAD=1 \ - 'neuronsimulator/neuron_wheel:latest-x86_64' \ + 'docker.io/neuronsimulator/neuron_wheel:manylinux_2_28_x86_64' \ packaging/python/build_wheels.bash linux $(python.version) coreneuron displayName: 'Building ManyLinux Wheel' diff --git a/docs/install/install_instructions.md b/docs/install/install_instructions.md index a5024dd9df..3a40359ba9 100644 --- a/docs/install/install_instructions.md +++ b/docs/install/install_instructions.md @@ -107,7 +107,7 @@ architecture. #### Linux -Like Mac OS, since 7.8.1 release python wheels are provided and you can use `pip` to install NEURON by opening a terminal and typing: +Like Mac OS, since 7.8.1 release Python wheels are provided and you can use `pip` to install NEURON by opening a terminal and typing: ``` pip3 install neuron @@ -116,6 +116,15 @@ pip3 install neuron Note that Python2 wheels are provided for the 8.0.x release series exclusively. Also, we are not providing .rpm or .deb installers for recent releases. +**Note**: as of NEURON major version 9, the minimum system requirements for using NEURON Python wheels on Linux are: + +* Debian 10 or higher +* Ubuntu 18.10 or higher +* Fedora 29 or higher +* CentOS/RHEL 8 or higher + +Furthermore, GCC >= 10 is required (older versions of GCC may work, but are not recommended). + #### Windows On Windows, the only recommended way to install NEURON is using the binary installer. You can download alpha diff --git a/docs/install/python_wheels.md b/docs/install/python_wheels.md index 016134d3ce..8456174bcd 100644 --- a/docs/install/python_wheels.md +++ b/docs/install/python_wheels.md @@ -4,7 +4,7 @@ ## Linux wheels In order to have NEURON binaries run on most Linux distros, we rely on the [manylinux project](https://github.com/pypa/manylinux). -Current NEURON Linux image is based on `manylinux2014`. +Current NEURON Linux image is based on `manylinux_2_28`. ### Setting up Docker @@ -35,23 +35,8 @@ Refer to the following image for the NEURON Docker Image workflow: ![](images/docker-workflow.png) -### Building the docker images automatically -If you run the workflow manually on Gitlab (with the "Run pipeline" button), it will now have the `mac_m1_container_build` and `x86_64_container_build` jobs added to it. These jobs need to be started manually and will not affect the overal workflow status. They don't need to be run every time, just when a refresh of the container images is necessary. -They will build the container images and push to docker hub. If you want to, you can still build manually (see next section), but there shouldn't be a requirement to do so any more. - -A word of warning: podman on OSX uses a virtual machine. The job can take care of starting it, but we generally try to have it running to avoid jobs cleaning up after themselves and killing the machine for other jobs. When starting the machine, set the variables that need to be set during the container build, ie. proxy and `BUILDAH_FORMAT`. - -`BUILDAH_FORMAT` ensures that `ONBUILD` instructions are enabled. - -``` -export http_proxy=http://bbpproxy.epfl.ch:80 -export https_proxy=http://bbpproxy.epfl.ch:80 -export HTTP_PROXY=http://bbpproxy.epfl.ch:80 -export HTTPS_PROXY=http://bbpproxy.epfl.ch:80 -export BUILDAH_FORMAT=docker -``` - ### Building the docker image manually + After making updates to any of the docker files, you can build the image with: ``` cd nrn/packaging/python @@ -108,11 +93,6 @@ For `HPE-MPT MPI`, since it's not open source, you need to acquire the headers a docker run -v $PWD/nrn:/root/nrn -w /root/nrn -v $PWD/mpt-headers/2.21/include:/nrnwheel/mpt/include -it neuronsimulator/neuron_wheel:latest-x86_64 bash ``` where `$PWD/mpt-headers` is the path to the HPE-MPT MPI headers on the host machine that end up mounted at `/nrnwheel/mpt/include`. -You can download the headers with: - -``` -git clone ssh://bbpcode.epfl.ch/user/kumbhar/mpt-headers -``` ## macOS wheels @@ -206,15 +186,6 @@ bash packaging/python/test_wheels.sh python3.9 "-i https://test.pypi.org/simple/ On MacOS, launching `nrniv -python` or `special -python` can fail to load `neuron` module due to security restrictions. For this specific purpose, please `export SKIP_EMBEDED_PYTHON_TEST=true` before launching the tests. -### Testing on BB5 -On BB5, we can test CPU wheels with: - -``` -salloc -A proj16 -N 1 --ntasks-per-node=4 -C "cpu" --time=1:00:00 -p interactive -module load unstable python -bash packaging/python/test_wheels.sh python3.9 wheelhouse/NEURON-7.8.0.236-cp39-cp39-manylinux1_x86_64.whl -``` - ## Publishing the wheels on Pypi via Azure ### Variables that drive PyPI upload diff --git a/packaging/python/Dockerfile b/packaging/python/Dockerfile index 95931335cc..d50bb54f6c 100644 --- a/packaging/python/Dockerfile +++ b/packaging/python/Dockerfile @@ -1,9 +1,23 @@ -ARG MANYLINUX_IMAGE=manylinux2014_x86_64 +ARG MANYLINUX_IMAGE=manylinux_2_28_x86_64 FROM quay.io/pypa/$MANYLINUX_IMAGE -LABEL authors="Pramod Kumbhar, Fernando Pereira, Alexandru Savulescu" +LABEL authors="Pramod Kumbhar, Fernando Pereira, Alexandru Savulescu, Goran Jelic-Cizmek" + +# problem: libstdc++ is _not_ forwards compatible, so if we try to compile mod +# files on a system that ships a version of it older than the one used for +# building the wheel itself, we'll get linker errors. +# solution: use a well-defined oldest-supported version of GCC +# we need to do this _before_ building any libraries from source +ARG OLDEST_SUPPORTED_GCC_VERSION=10 +RUN yum -y install \ + gcc-toolset-${OLDEST_SUPPORTED_GCC_VERSION}-gcc \ + gcc-toolset-${OLDEST_SUPPORTED_GCC_VERSION}-gcc-c++ \ + && yum -y clean all && rm -rf /var/cache +ENV PATH /opt/rh/gcc-toolset-${OLDEST_SUPPORTED_GCC_VERSION}/root/usr/bin:$PATH +ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${OLDEST_SUPPORTED_GCC_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${OLDEST_SUPPORTED_GCC_VERSION}/root/usr/lib:/opt/rh/gcc-toolset-${OLDEST_SUPPORTED_GCC_VERSION}/root/usr/lib64/dyninst:/opt/rh/gcc-toolset-${OLDEST_SUPPORTED_GCC_VERSION}/root/usr/lib/dyninst +ENV DEVTOOLSET_ROOTPATH=/opt/rh/gcc-toolset-${OLDEST_SUPPORTED_GCC_VERSION}/root -RUN gcc --version && python --version +RUN gcc --version && python3 --version # install basic packages RUN yum -y install \ @@ -13,6 +27,9 @@ RUN yum -y install \ vim \ curl \ unzip \ + flex \ + mpich-devel \ + openmpi-devel \ bison \ autoconf \ automake \ @@ -28,11 +45,6 @@ RUN yum -y install \ WORKDIR /root -# newer flex with rpmbuild (manylinux2014 based on Centos7 currently has flex < 2.6) -RUN rpmbuild --rebuild https://vault.centos.org/8-stream/AppStream/Source/SPackages/flex-2.6.1-9.el8.src.rpm \ - && yum -y install rpmbuild/RPMS/*/flex-2.6.1-9.el7.*.rpm \ - && rm -rf rpmbuild - RUN wget http://ftpmirror.gnu.org/ncurses/ncurses-6.4.tar.gz \ && tar -xvzf ncurses-6.4.tar.gz \ && cd ncurses-6.4 \ @@ -40,21 +52,6 @@ RUN wget http://ftpmirror.gnu.org/ncurses/ncurses-6.4.tar.gz \ && make -j install \ && cd .. && rm -rf ncurses-6.4 ncurses-6.4.tar.gz -RUN curl -L -o mpich-3.3.2.tar.gz http://www.mpich.org/static/downloads/3.3.2/mpich-3.3.2.tar.gz \ - && tar -xvzf mpich-3.3.2.tar.gz \ - && cd mpich-3.3.2 \ - && ./configure --disable-fortran --prefix=/nrnwheel/mpich \ - && make -j install \ - && cd .. && rm -rf mpich-3.3.2 mpich-3.3.2.tar.gz \ - && rm -rf /nrnwheel/mpich/share/doc /nrnwheel/mpich/share/man - -RUN curl -L -o openmpi-4.0.3.tar.gz https://download.open-mpi.org/release/open-mpi/v4.0/openmpi-4.0.3.tar.gz \ - && tar -xvzf openmpi-4.0.3.tar.gz \ - && cd openmpi-4.0.3 \ - && ./configure --prefix=/nrnwheel/openmpi \ - && make -j install \ - && cd .. && rm -rf openmpi-4.0.3 openmpi-4.0.3.tar.gz - RUN curl -L -o readline-7.0.tar.gz https://ftp.gnu.org/gnu/readline/readline-7.0.tar.gz \ && tar -xvzf readline-7.0.tar.gz \ && cd readline-7.0 \ @@ -83,13 +80,9 @@ RUN curl -L -o Python-3.10.0.tar.gz https://www.python.org/ftp/python/3.10.0/Pyt && make -j altinstall \ && cd .. && rm -rf Python-3.10.0 Python-3.10.0.tar.gz -ENV PATH /nrnwheel/openmpi/bin:$PATH RUN yum -y install epel-release libX11-devel libXcomposite-devel vim-enhanced && yum -y clean all && rm -rf /var/cache RUN yum -y remove ncurses-devel -# Copy Dockerfile for reference -COPY Dockerfile . - # build wheels from there WORKDIR /root @@ -97,3 +90,8 @@ WORKDIR /root RUN rm -fr /opt/python/cp313-cp313t ENV NMODL_PYLIB=/nrnwheel/python/lib/libpython3.10.so.1.0 + +ENV PATH /usr/lib64/openmpi/bin:$PATH + +# Copy Dockerfile for reference +COPY Dockerfile . diff --git a/packaging/python/README.md b/packaging/python/README.md deleted file mode 120000 index 2529b5ed57..0000000000 --- a/packaging/python/README.md +++ /dev/null @@ -1 +0,0 @@ -../../docs/install/python_wheels.md \ No newline at end of file diff --git a/packaging/python/README.md b/packaging/python/README.md new file mode 100644 index 0000000000..7b7e1fc82b --- /dev/null +++ b/packaging/python/README.md @@ -0,0 +1 @@ +Refer to [this document](../../docs/install/python_wheels.md) to see how to build Python wheels for NEURON. diff --git a/packaging/python/build_wheels.bash b/packaging/python/build_wheels.bash index abea2dcb5b..c022bdec7f 100755 --- a/packaging/python/build_wheels.bash +++ b/packaging/python/build_wheels.bash @@ -193,7 +193,15 @@ coreneuron=$3 case "$1" in linux) - MPI_INCLUDE_HEADERS="/nrnwheel/openmpi/include;/nrnwheel/mpich/include" + MPI_POSSIBLE_INCLUDE_HEADERS="/usr/include/openmpi-$(uname -m) /usr/include/mpich-$(uname -m) /usr/lib/$(uname -m)-linux-gnu/openmpi/include /usr/include/$(uname -m)-linux-gnu/mpich" + MPI_INCLUDE_HEADERS="" + for dir in $MPI_POSSIBLE_INCLUDE_HEADERS + do + if [ -d "${dir}" ]; then + MPI_INCLUDE_HEADERS="${MPI_INCLUDE_HEADERS};${dir}" + fi + done + # Check for MPT headers. On Azure, we extract them from a secure file and mount them in the docker image in: MPT_INCLUDE_PATH="/nrnwheel/mpt/include" if [ -d "$MPT_INCLUDE_PATH" ]; then @@ -221,7 +229,16 @@ case "$1" in MPI_INCLUDE_HEADERS="${BREW_PREFIX}/opt/openmpi/include;${BREW_PREFIX}/opt/mpich/include" build_wheel_osx $(which python3) "$coreneuron" "$MPI_INCLUDE_HEADERS" else - MPI_INCLUDE_HEADERS="/usr/lib/x86_64-linux-gnu/openmpi/include;/usr/include/x86_64-linux-gnu/mpich" + # first two are for AlmaLinux 8 (default for manylinux_2_28); + # second two are for Debian/Ubuntu derivatives + MPI_POSSIBLE_INCLUDE_HEADERS="/usr/include/openmpi-$(uname -m) /usr/include/mpich-$(uname -m) /usr/lib/$(uname -m)-linux-gnu/openmpi/include /usr/include/$(uname -m)-linux-gnu/mpich" + MPI_INCLUDE_HEADERS="" + for dir in $MPI_POSSIBLE_INCLUDE_HEADERS + do + if [ -d "${dir}" ]; then + MPI_INCLUDE_HEADERS="${MPI_INCLUDE_HEADERS};${dir}" + fi + done build_wheel_linux $(which python3) "$coreneuron" "$MPI_INCLUDE_HEADERS" fi ls wheelhouse/ diff --git a/packaging/python/test_wheels.sh b/packaging/python/test_wheels.sh index 853d7cbd7e..a53dfce3f2 100755 --- a/packaging/python/test_wheels.sh +++ b/packaging/python/test_wheels.sh @@ -5,7 +5,7 @@ set -xe # See CMake's CMAKE_HOST_SYSTEM_PROCESSOR documentation # On the systems where we are building wheel we can rely # on uname -m. Note that this is just wheel testing script. -ARCH_DIR=`uname -m` +ARCH_DIR="$(uname -m)" if [ ! -f setup.py ]; then echo "Error: Please launch $0 from the root dir" @@ -36,10 +36,6 @@ run_mpi_test () { echo "======= Testing $mpi_name ========" if [ -n "$mpi_module" ]; then echo "Loading module $mpi_module" - if [[ $(hostname -f) = *r*bbp.epfl.ch* ]]; then - echo "\tusing unstable on BB5" - module load unstable - fi module load $mpi_module fi @@ -179,29 +175,16 @@ run_parallel_test() { export DYLD_LIBRARY_PATH=${BREW_PREFIX}/opt/open-mpi/lib:$DYLD_LIBRARY_PATH run_mpi_test "${BREW_PREFIX}/opt/open-mpi/bin/mpirun" "OpenMPI" "" - # CI Linux or Azure Linux - elif [[ "$CI_OS_NAME" == "linux" || "$AGENT_OS" == "Linux" ]]; then + # CI Linux or Azure Linux or circleCI build (all on Debian/Ubuntu) + elif [[ "$CI_OS_NAME" == "linux" || "$AGENT_OS" == "Linux" || "$CIRCLECI" == "true" ]]; then # make debugging easier sudo update-alternatives --get-selections | grep mpi - sudo update-alternatives --list mpi-x86_64-linux-gnu + sudo update-alternatives --list mpi-${ARCH_DIR}-linux-gnu # choose mpich - sudo update-alternatives --set mpi-x86_64-linux-gnu /usr/include/x86_64-linux-gnu/mpich + sudo update-alternatives --set mpi-${ARCH_DIR}-linux-gnu /usr/include/${ARCH_DIR}-linux-gnu/mpich run_mpi_test "mpirun.mpich" "MPICH" "" # choose openmpi - sudo update-alternatives --set mpi-x86_64-linux-gnu /usr/lib/x86_64-linux-gnu/openmpi/include - run_mpi_test "mpirun.openmpi" "OpenMPI" "" - - # BB5 with multiple MPI libraries - elif [[ $(hostname -f) = *r*bbp.epfl.ch* ]]; then - run_mpi_test "srun" "HPE-MPT" "hpe-mpi" - run_mpi_test "mpirun" "Intel MPI" "intel-oneapi-mpi" - run_mpi_test "srun" "MVAPICH2" "mvapich2" - - # circle-ci build - elif [[ "$CIRCLECI" == "true" ]]; then - sudo update-alternatives --set mpi-aarch64-linux-gnu /usr/include/aarch64-linux-gnu/mpich - run_mpi_test "mpirun.mpich" "MPICH" "" - sudo update-alternatives --set mpi-aarch64-linux-gnu /usr/lib/aarch64-linux-gnu/openmpi/include + sudo update-alternatives --set mpi-${ARCH_DIR}-linux-gnu /usr/lib/${ARCH_DIR}-linux-gnu/openmpi/include run_mpi_test "mpirun.openmpi" "OpenMPI" "" # linux desktop or docker container used for wheel diff --git a/share/lib/python/scripts/_binwrapper.py b/share/lib/python/scripts/_binwrapper.py index 1fb7e417ab..7bc439a603 100755 --- a/share/lib/python/scripts/_binwrapper.py +++ b/share/lib/python/scripts/_binwrapper.py @@ -7,6 +7,7 @@ import shutil import subprocess import sys +import warnings from importlib.metadata import metadata, PackageNotFoundError from importlib.util import find_spec from pathlib import Path @@ -44,21 +45,23 @@ def _set_default_compiler(): os.environ.setdefault("CXX", ccompiler.compiler_cxx[0]) -def _check_cpp_compiler_version(): - """Check if GCC compiler is >= 9.0 otherwise show warning""" +def _check_cpp_compiler_version(min_version: str): + """Check if GCC compiler is >= min supported one, otherwise show warning""" try: cpp_compiler = os.environ.get("CXX", "") version = subprocess.run( - [cpp_compiler, "--version"], stdout=subprocess.PIPE + [cpp_compiler, "--version"], + stdout=subprocess.PIPE, ).stdout.decode("utf-8") - if "GCC" in version: + if "gcc" in version.lower() or "gnu" in version.lower(): version = subprocess.run( - [cpp_compiler, "-dumpversion"], stdout=subprocess.PIPE + [cpp_compiler, "-dumpversion"], + stdout=subprocess.PIPE, ).stdout.decode("utf-8") - if Version(version) <= Version("9.0"): - print( - "Warning: GCC >= 9.0 is required with this version of NEURON but found", - version, + if Version(version) <= Version(min_version): + warnings.warn( + f"Warning: GCC >= {min_version} is required with this version of NEURON" + f"but found version {version}", ) except: pass @@ -111,7 +114,7 @@ def _wrap_executable(output_name): if exe.endswith("nrnivmodl"): # To create a wrapper for special (so it also gets ENV vars) we intercept nrnivmodl - _check_cpp_compiler_version() + _check_cpp_compiler_version("10.0") subprocess.check_call([exe, *sys.argv[1:]]) _wrap_executable("special") sys.exit(0)