Skip to content

Commit 76fdeed

Browse files
Karthik Prasadfacebook-github-bot
Karthik Prasad
authored andcommitted
Move CI to Github Actions (pytorch#620)
Summary: Moves the continuous integration (CI) processy from CircleCI to Github Actions. The changes include creating new yml files to define the CI workflows that largely mimic the existing circleCi config. There are two new workflows, one for cpu jobs and one for gpu jobs. NOTE: Currently, the CPU jobs are running fine, but NOT the GPU jobs. Differential Revision: D53107145
1 parent caf433d commit 76fdeed

File tree

3 files changed

+462
-0
lines changed

3 files changed

+462
-0
lines changed

.github/workflows/ci_cpu.yml

+170
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,170 @@
1+
name: CI_CPU
2+
3+
on:
4+
push:
5+
branches:
6+
- main
7+
pull_request:
8+
branches:
9+
- main
10+
schedule:
11+
- cron: '4 4 * * *' # This schedule runs the nightly job every night at 4:04AM
12+
13+
14+
jobs:
15+
########### LINT ##############
16+
lint_py39_torch_release:
17+
runs-on: ubuntu-latest
18+
steps:
19+
- name: Checkout
20+
uses: actions/checkout@v2
21+
- name: Set up Python
22+
uses: actions/setup-python@v2
23+
with:
24+
python-version: 3.9
25+
- name: Install dependencies
26+
run: |
27+
python -m pip install --upgrade pip
28+
pip install flake8 black isort
29+
./scripts/install_via_pip.sh
30+
- name: Lint with flake8
31+
run: flake8 --config ./.github/workflows/flake8_config.ini
32+
- name: Lint with black
33+
run: black --check --diff --color .
34+
- name: Check import order with isort
35+
run: isort -v -l 88 -o opacus --lines-after-imports 2 -m 3 --trailing-comma --check-only .
36+
37+
########### UNIT TESTS ##############
38+
unittest_py38_torch_release:
39+
runs-on: ubuntu-latest
40+
steps:
41+
- name: Checkout
42+
uses: actions/checkout@v2
43+
- name: Set up Python
44+
uses: actions/setup-python@v2
45+
with:
46+
python-version: 3.8
47+
- name: Install dependencies
48+
run: |
49+
python -m pip install --upgrade pip
50+
pip install pytest coverage coveralls
51+
./scripts/install_via_pip.sh
52+
- name: Run unit tests
53+
run: |
54+
mkdir unittest-py38-release-reports
55+
coverage run -m pytest --doctest-modules -p conftest --junitxml=unittest-py38-release-reports/junit.xml opacus
56+
coverage report -i -m
57+
- name: Store test results
58+
uses: actions/upload-artifact@v2
59+
with:
60+
name: unittest-py38-release-reports
61+
path: unittest-py38-release-reports
62+
63+
unittest_py39_torch_release:
64+
runs-on: ubuntu-latest
65+
steps:
66+
- name: Checkout
67+
uses: actions/checkout@v2
68+
- name: Set up Python
69+
uses: actions/setup-python@v2
70+
with:
71+
python-version: 3.9
72+
- name: Install dependencies
73+
run: |
74+
python -m pip install --upgrade pip
75+
pip install pytest coverage coveralls
76+
./scripts/install_via_pip.sh
77+
- name: Run unit tests
78+
run: |
79+
mkdir unittest-py39-release-reports
80+
coverage run -m pytest --doctest-modules -p conftest --junitxml=unittest-py39-release-reports/junit.xml opacus
81+
coverage report -i -m
82+
- name: Store test results
83+
uses: actions/upload-artifact@v2
84+
with:
85+
name: unittest-py39-release-reports
86+
path: unittest-py39-release-reports
87+
88+
prv_accountant_values:
89+
runs-on: ubuntu-latest
90+
steps:
91+
- name: Checkout
92+
uses: actions/checkout@v2
93+
- name: Set up Python
94+
uses: actions/setup-python@v2
95+
with:
96+
python-version: 3.9
97+
- name: Install dependencies
98+
run: |
99+
python -m pip install --upgrade pip
100+
./scripts/install_via_pip.sh
101+
- name: Run prv accountant unit tests
102+
run: |
103+
python -m unittest opacus.tests.prv_accountant
104+
105+
########### NIGHTLY TEST ##############
106+
unittest_py39_torch_nightly:
107+
runs-on: ubuntu-latest
108+
if: ${{ github.event_name == 'schedule' }}
109+
steps:
110+
- name: Checkout
111+
uses: actions/checkout@v2
112+
- name: Set up Python
113+
uses: actions/setup-python@v2
114+
with:
115+
python-version: 3.9
116+
- name: Install dependencies
117+
run: |
118+
python -m pip install --upgrade pip
119+
pip install pytest coverage coveralls
120+
./scripts/install_via_pip.sh -n
121+
- name: Run unit tests
122+
run: |
123+
mkdir unittest-py39-nightly-reports
124+
python -m pytest --doctest-modules -p conftest --junitxml=unittest-py39-nightly-reports/junit.xml opacus
125+
- name: Store test results
126+
uses: actions/upload-artifact@v2
127+
with:
128+
name: unittest-py39-nightly-reports
129+
path: unittest-py39-nightly-reports
130+
131+
########### INTEGRATION TEST ##############
132+
integrationtest_py39_torch_release_cpu:
133+
runs-on: ubuntu-latest
134+
steps:
135+
- name: Checkout
136+
uses: actions/checkout@v2
137+
- name: Set up Python
138+
uses: actions/setup-python@v2
139+
with:
140+
python-version: 3.9
141+
- name: Install dependencies
142+
run: |
143+
python -m pip install --upgrade pip
144+
pip install pytest coverage coveralls
145+
./scripts/install_via_pip.sh
146+
- name: Run MNIST integration test (CPU)
147+
run: |
148+
mkdir -p runs/mnist/data
149+
mkdir -p runs/mnist/test-reports
150+
coverage run examples/mnist.py --lr 0.25 --sigma 0.7 -c 1.5 --batch-size 64 --epochs 1 --data-root runs/mnist/data --n-runs 1 --device cpu
151+
python -c "import torch; accuracy = torch.load('run_results_mnist_0.25_0.7_1.5_64_1.pt'); exit(0) if (accuracy[0]>0.78 and accuracy[0]<0.95) else exit(1)"
152+
coverage report -i -m
153+
- name: Store test results
154+
uses: actions/upload-artifact@v2
155+
with:
156+
name: mnist-cpu-reports
157+
path: runs/mnist/test-reports
158+
159+
######## FINISH COVERALLS ##########
160+
finish_coveralls_parallel:
161+
needs: [unittest_py38_torch_release, unittest_py39_torch_release, integrationtest_py39_torch_release_cpu]
162+
runs-on: ubuntu-latest
163+
steps:
164+
- name: Checkout
165+
uses: actions/checkout@v2
166+
- name: Finish Coveralls Parallel
167+
uses: coverallsapp/github-action@v2
168+
with:
169+
github_token: ${{ secrets.GITHUB_TOKEN }}
170+
parallel-finished: true

.github/workflows/ci_gpu.yml

+173
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,173 @@
1+
name: CI_GPU
2+
3+
on:
4+
push:
5+
branches:
6+
- main
7+
pull_request:
8+
branches:
9+
- main
10+
11+
unittest_multi_gpu:
12+
runs-on: linux.4xlarge.nvidia.gpu
13+
steps:
14+
- name: Checkout
15+
uses: actions/checkout@v2
16+
17+
- name: Set up Python
18+
uses: actions/setup-python@v2
19+
with:
20+
python-version: 3.9
21+
22+
- name: Install dependencies
23+
run: |
24+
./scripts/install_via_pip.sh -c
25+
26+
- name: Run multi-GPU unit tests
27+
run: |
28+
nvidia-smi
29+
nvcc --version
30+
python -m unittest opacus.tests.multigpu_gradcheck.GradientComputationTest.test_gradient_correct
31+
32+
33+
integrationtest_py39_torch_release_cuda:
34+
runs-on: ubuntu-latest
35+
container:
36+
# https://hub.docker.com/r/nvidia/cuda
37+
image: nvidia/cuda:12.3.1-base-ubuntu22.04
38+
options: --gpus all
39+
env:
40+
TZ: 'UTC'
41+
steps:
42+
- name: Checkout
43+
uses: actions/checkout@v2
44+
45+
- name: Set up Python
46+
uses: actions/setup-python@v2
47+
with:
48+
python-version: 3.9
49+
50+
- name: Install dependencies
51+
run: |
52+
python -m pip install --upgrade pip
53+
pip install pytest coverage coveralls
54+
./scripts/install_via_pip.sh -c
55+
56+
- name: Install CUDA toolkit and cuDNN
57+
run: |
58+
apt-get update
59+
apt-get install -y --no-install-recommends \
60+
cuda-toolkit-11-1 \
61+
libcudnn8=8.1.1.33-1+cuda11.1 \
62+
libcudnn8-dev=8.1.1.33-1+cuda11.1
63+
64+
- name: Run MNIST integration test (CUDA)
65+
run: |
66+
mkdir -p runs/mnist/data
67+
mkdir -p runs/mnist/test-reports
68+
python examples/mnist.py --lr 0.25 --sigma 0.7 -c 1.5 --batch-size 64 --epochs 1 --data-root runs/mnist/data --n-runs 1 --device cuda
69+
python -c "import torch; accuracy = torch.load('run_results_mnist_0.25_0.7_1.5_64_1.pt'); exit(0) if (accuracy[0]>0.78 and accuracy[0]<0.95) else exit(1)"
70+
71+
- name: Store MNIST test results
72+
uses: actions/upload-artifact@v2
73+
with:
74+
name: mnist-gpu-reports
75+
path: runs/mnist/test-reports
76+
77+
- name: Run CIFAR10 integration test (CUDA)
78+
run: |
79+
mkdir -p runs/cifar10/data
80+
mkdir -p runs/cifar10/logs
81+
mkdir -p runs/cifar10/test-reports
82+
pip install tensorboard
83+
python examples/cifar10.py --lr 0.1 --sigma 1.5 -c 10 --batch-size 2000 --epochs 10 --data-root runs/cifar10/data --log-dir runs/cifar10/logs --device cuda
84+
python -c "import torch; model = torch.load('model_best.pth.tar'); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)"
85+
python examples/cifar10.py --lr 0.1 --sigma 1.5 -c 10 --batch-size 2000 --epochs 10 --data-root runs/cifar10/data --log-dir runs/cifar10/logs --device cuda --grad_sample_mode no_op
86+
python -c "import torch; model = torch.load('model_best.pth.tar'); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)"
87+
88+
- name: Store CIFAR10 test results
89+
uses: actions/upload-artifact@v2
90+
with:
91+
name: cifar10-gpu-reports
92+
path: runs/cifar10/test-reports
93+
94+
- name: Run IMDb integration test (CUDA)
95+
run: |
96+
mkdir -p runs/imdb/data
97+
mkdir -p runs/imdb/test-reports
98+
pip install --user datasets transformers
99+
python examples/imdb.py --lr 0.02 --sigma 1.0 -c 1.0 --batch-size 64 --max-sequence-length 256 --epochs 2 --data-root runs/imdb/data --device cuda
100+
python -c "import torch; accuracy = torch.load('run_results_imdb_classification.pt'); exit(0) if (accuracy>0.54 and accuracy<0.66) else exit(1)"
101+
102+
- name: Store IMDb test results
103+
uses: actions/upload-artifact@v2
104+
with:
105+
name: imdb-gpu-reports
106+
path: runs/imdb/test-reports
107+
108+
- name: Run charlstm integration test (CUDA)
109+
run: |
110+
mkdir -p runs/charlstm/data
111+
wget https://download.pytorch.org/tutorial/data.zip -O runs/charlstm/data/data.zip
112+
unzip runs/charlstm/data/data.zip -d runs/charlstm/data
113+
rm runs/charlstm/data/data.zip
114+
mkdir -p runs/charlstm/test-reports
115+
pip install scikit-learn
116+
python examples/char-lstm-classification.py --epochs=20 --learning-rate=2.0 --hidden-size=128 --delta=8e-5 --batch-size 400 --n-layers=1 --sigma=1.0 --max-per-sample-grad-norm=1.5 --data-root="runs/charlstm/data/data/names/" --device cuda --test-every 5
117+
python -c "import torch; accuracy = torch.load('run_results_chr_lstm_classification.pt'); exit(0) if (accuracy>0.60 and accuracy<0.80) else exit(1)"
118+
119+
- name: Store test results
120+
uses: actions/upload-artifact@v2
121+
with:
122+
name: charlstm-gpu-reports
123+
path: runs/charlstm/test-reports
124+
125+
micro_benchmarks_py39_torch_release_cuda:
126+
runs-on: ubuntu-latest
127+
needs: [integrationtest_py39_torch_release_cuda]
128+
container:
129+
# https://hub.docker.com/r/nvidia/cuda
130+
image: nvidia/cuda:12.3.1-base-ubuntu22.04
131+
options: --gpus all
132+
env:
133+
TZ: 'UTC'
134+
steps:
135+
- name: Checkout
136+
uses: actions/checkout@v2
137+
138+
- name: Set up Python
139+
uses: actions/setup-python@v2
140+
with:
141+
python-version: 3.9
142+
143+
- name: Install dependencies
144+
run: |
145+
python -m pip install --upgrade pip
146+
pip install pytest coverage coveralls
147+
./scripts/install_via_pip.sh
148+
149+
- name: Install CUDA toolkit and cuDNN
150+
run: |
151+
apt-get update
152+
apt-get install -y --no-install-recommends \
153+
cuda-toolkit-11-1 \
154+
libcudnn8=8.1.1.33-1+cuda11.1 \
155+
libcudnn8-dev=8.1.1.33-1+cuda11.1
156+
157+
- name: Run benchmark integration tests (CUDA)
158+
run: |
159+
mkdir -p benchmarks/results/raw
160+
python benchmarks/run_benchmarks.py --batch_size 16 --layers "groupnorm instancenorm layernorm" --config_file ./benchmarks/config.json --root ./benchmarks/results/raw/ --cont
161+
IFS=$' ';layers=("groupnorm" "instancenorm" "layernorm"); rm -rf /tmp/report_layers; mkdir -p /tmp/report_layers; IFS=$'\n'; files=`( echo "${layers[*]}" ) | sed 's/.*/.\/benchmarks\/results\/raw\/&*/'`
162+
cp -v ${files[@]} /tmp/report_layers
163+
report_id=`IFS=$'-'; echo "${layers[*]}"`
164+
python benchmarks/generate_report.py --path-to-results /tmp/report_layers --save-path benchmarks/results/report-${report_id}.csv --format csv
165+
python benchmarks/generate_report.py --path-to-results /tmp/report_layers --save-path benchmarks/results/report-${report_id}.pkl --format pkl
166+
python benchmarks/check_threshold.py --report-path "./benchmarks/results/report-"$report_id".pkl" --metric runtime --threshold 3.0 --column "hooks/baseline"
167+
python benchmarks/check_threshold.py --report-path "./benchmarks/results/report-"$report_id".pkl" --metric memory --threshold 1.6 --column "hooks/baseline"
168+
169+
- name: Store artifacts
170+
uses: actions/upload-artifact@v2
171+
with:
172+
name: benchmarks-reports
173+
path: benchmarks/results/

0 commit comments

Comments
 (0)