diff --git a/README.md b/README.md index 01e18dcbe..70d902ac3 100644 --- a/README.md +++ b/README.md @@ -10,3 +10,7 @@ Folders: - **windows** : scripts to build Windows wheels - **cron** : scripts to drive all of the above scripts across multiple configurations together - **analytics** : scripts to pull wheel download count from our AWS s3 logs + +## Testing + +In order to test build triggered by PyTorch repo's GitHub actions see [these instructions](https://github.com/pytorch/pytorch/blob/master/.github/scripts/README.md#testing-pytorchbuilder-changes) diff --git a/conda/build_pytorch.sh b/conda/build_pytorch.sh index 33e9278b0..4438059a1 100755 --- a/conda/build_pytorch.sh +++ b/conda/build_pytorch.sh @@ -388,7 +388,18 @@ for py_ver in "${DESIRED_PYTHON[@]}"; do # Install the built package and run tests, unless it's for mac cross compiled arm64 if [[ -z "$CROSS_COMPILE_ARM64" ]]; then - conda install -y "$built_package" + # Install the package as if from local repo instead of tar.bz2 directly in order + # to trigger runtime dependency installation. See https://github.com/conda/conda/issues/1884 + # Notes: + # - pytorch-nightly is included to install torchtriton + # - nvidia is included for cuda builds, there's no harm in listing the channel for cpu builds + if [[ "$OSTYPE" == "msys" ]]; then + # note the extra slash: `pwd -W` returns `c:/path/to/dir`, we need to add an extra slash for the URI + local_channel="/$(pwd -W)/$output_folder" + else + local_channel="$(pwd)/$output_folder" + fi + conda install -y -c "file://$local_channel" pytorch==$PYTORCH_BUILD_VERSION -c pytorch -c numba/label/dev -c pytorch-nightly -c nvidia echo "$(date) :: Running tests" pushd "$pytorch_rootdir" diff --git a/run_tests.sh b/run_tests.sh index 18b00f00b..fd66835e2 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -72,21 +72,6 @@ fi # Environment initialization if [[ "$package_type" == conda || "$(uname)" == Darwin ]]; then - # Why are there two different ways to install dependencies after installing an offline package? - # The "cpu" conda package for pytorch doesn't actually depend on "cpuonly" which means that - # when we attempt to update dependencies using "conda update --all" it will attempt to install - # whatever "cudatoolkit" your current computer relies on (which is sometimes none). When conda - # tries to install this cudatoolkit that correlates with your current hardware it will also - # overwrite the currently installed "local" pytorch package meaning you aren't actually testing - # the right package. - # TODO (maybe): Make the "cpu" package of pytorch depend on "cpuonly" - if [[ "$cuda_ver" = 'cpu' ]]; then - # Installing cpuonly will also install dependencies as well - retry conda install -y -c pytorch cpuonly - else - # Install dependencies from installing the pytorch conda package offline - retry conda update -yq --all -c defaults -c pytorch -c numba/label/dev - fi # Install the testing dependencies retry conda install -yq future hypothesis ${NUMPY_PACKAGE} ${PROTOBUF_PACKAGE} pytest setuptools six typing_extensions pyyaml else @@ -140,15 +125,21 @@ python -c "import torch; exit(0 if torch.__version__ == '$expected_version' else # Test that CUDA builds are setup correctly if [[ "$cuda_ver" != 'cpu' ]]; then - # Test CUDA archs - echo "Checking that CUDA archs are setup correctly" - timeout 20 python -c 'import torch; torch.randn([3,5]).cuda()' - - # These have to run after CUDA is initialized - echo "Checking that magma is available" - python -c 'import torch; torch.rand(1).cuda(); exit(0 if torch.cuda.has_magma else 1)' - echo "Checking that CuDNN is available" - python -c 'import torch; exit(0 if torch.backends.cudnn.is_available() else 1)' + cuda_installed=1 + nvidia-smi || cuda_installed=0 + if [[ "$cuda_installed" == 0 ]]; then + echo "Skip CUDA tests for machines without a Nvidia GPU card" + else + # Test CUDA archs + echo "Checking that CUDA archs are setup correctly" + timeout 20 python -c 'import torch; torch.randn([3,5]).cuda()' + + # These have to run after CUDA is initialized + echo "Checking that magma is available" + python -c 'import torch; torch.rand(1).cuda(); exit(0 if torch.cuda.has_magma else 1)' + echo "Checking that CuDNN is available" + python -c 'import torch; exit(0 if torch.backends.cudnn.is_available() else 1)' + fi fi # Check that OpenBlas is not linked to on Macs