Skip to content
Open

Mypy #106

Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,24 @@ name: Continuous Integration
on: [push]

jobs:
mypy:
name: mypy check
runs-on: ubuntu-18.04

steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
uses: actions/setup-python@v2
with:
python-version: '3.7'
- name: Install mypy
run: |
cat scripts/requirements_dev.txt | grep "mypy" | xargs pip install
- name: Check
run: |
scripts/mypy.sh --version
scripts/mypy.sh

ubuntu-unit-test:

runs-on: ubuntu-18.04
Expand Down
12 changes: 6 additions & 6 deletions bluefog/common/topology_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,11 +116,11 @@ def ExponentialGraph(size: int, base: int = 2) -> nx.DiGraph:
x.append(1.0)
else:
x.append(0.0)
x = np.array(x)
x /= x.sum()
y = np.array(x)
y /= y.sum()
topo = np.empty((size, size))
for i in range(size):
topo[i] = np.roll(x, i)
topo[i] = np.roll(y, i)
G = nx.from_numpy_array(topo, create_using=nx.DiGraph)
return G

Expand Down Expand Up @@ -148,11 +148,11 @@ def SymmetricExponentialGraph(size: int, base: int = 4) -> nx.DiGraph:
x.append(1.0)
else:
x.append(0.0)
x = np.array(x)
x /= x.sum()
y = np.array(x)
y /= y.sum()
topo = np.empty((size, size))
for i in range(size):
topo[i] = np.roll(x, i)
topo[i] = np.roll(y, i)
G = nx.from_numpy_array(topo, create_using=nx.DiGraph)
return G

Expand Down
2 changes: 1 addition & 1 deletion bluefog/common/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
EXTENSIONS = ['tensorflow', 'torch']

def is_running_from_ipython():
from IPython import get_ipython
from IPython import get_ipython # type: ignore
return get_ipython() is not None

def get_ext_suffix():
Expand Down
2 changes: 1 addition & 1 deletion bluefog/run/env_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def is_open_mpi_installed():

def is_ipyparallel_installed():
try:
import ipyparallel # pylint: disable=unused-import
import ipyparallel # type: ignore # pylint: disable=unused-import
return True
except ImportError:
return False
Expand Down
2 changes: 1 addition & 1 deletion bluefog/run/horovod_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import os
import sys

import six
import six # type: ignore
from bluefog.run.horovodrun.common.util import codec, safe_shell_exec, timeout, secret
from bluefog.run.horovodrun.driver import driver_service
from bluefog.run.horovodrun.task import task_service
Expand Down
2 changes: 1 addition & 1 deletion bluefog/run/horovodrun/common/util/codec.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# ==============================================================================

import base64
import cloudpickle
import cloudpickle # type: ignore


def loads_base64(encoded):
Expand Down
6 changes: 3 additions & 3 deletions bluefog/run/horovodrun/common/util/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
import socket
import struct
import threading
import cloudpickle
import psutil
import cloudpickle # type: ignore
import psutil # type: ignore

from six.moves import queue, socketserver
from six.moves import queue, socketserver # type: ignore

from bluefog.run.horovodrun.common.util import secret

Expand Down
2 changes: 1 addition & 1 deletion bluefog/run/horovodrun/common/util/safe_shell_exec.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# ==============================================================================

import os
import psutil
import psutil # type: ignore
import signal
import subprocess
import sys
Expand Down
2 changes: 1 addition & 1 deletion bluefog/run/horovodrun/util/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import errno
import os
import threading
import cloudpickle
import cloudpickle # type: ignore


class Cache(object):
Expand Down
2 changes: 1 addition & 1 deletion bluefog/run/horovodrun/util/network.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import socket
import psutil
import psutil # type: ignore

from bluefog.run.horovodrun.util import threads

Expand Down
2 changes: 1 addition & 1 deletion bluefog/run/horovodrun/util/threads.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

import threading

from six.moves import queue
from six.moves import queue # type: ignore


def execute_function_multithreaded(fn,
Expand Down
6 changes: 2 additions & 4 deletions bluefog/run/interactive_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
import time
from typing import Dict, List

import ipyparallel as ipp
import ipyparallel as ipp # type: ignore
import bluefog
from bluefog.run import env_util, network_util, horovod_driver

Expand Down Expand Up @@ -318,7 +318,7 @@ def multiple_machines_launch(args, env: Dict[str, str],
common_intfs = horovod_driver.driver_fn(all_host_names, local_host_names,
args.ssh_port, args.verbose)
else:
common_intfs = [args.nic]
common_intfs = set(args.nic)

tcp_intf_arg = '-mca btl_tcp_if_include {common_intfs}'.format(
common_intfs=','.join(common_intfs)) if common_intfs else ''
Expand Down Expand Up @@ -384,8 +384,6 @@ def multiple_machines_launch(args, env: Dict[str, str],
ib_arg=ib_arg,
nccl_socket_intf_arg=nccl_socket_intf_arg,
extra_flags=extra_flags,
env=' '.join('-x %s' % key for key in env.keys()
if env_util.is_exportable(key)),
command=ipengine_command)
)
p_engine = subprocess.Popen(mpi_ipengine_command, shell=True, env=env)
Expand Down
2 changes: 1 addition & 1 deletion bluefog/run/network_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import re
import socket

import psutil
import psutil # type: ignore
from bluefog.run.horovodrun.common.util import safe_shell_exec

# Number of retries for sshing into the hosts
Expand Down
6 changes: 0 additions & 6 deletions bluefog/run/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,8 @@

import argparse
import os
import re
import shlex
import socket
import subprocess
import sys
import traceback

import psutil
import bluefog
from bluefog.run import env_util, network_util, horovod_driver

Expand Down
2 changes: 1 addition & 1 deletion bluefog/tensorflow/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import collections
import os
import tensorflow as tf
import tensorflow as tf # type: ignore

from bluefog.common.util import check_extension
check_extension('bluefog.tensorflow', __file__, 'mpi_lib')
Expand Down
8 changes: 4 additions & 4 deletions bluefog/tensorflow/mpi_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@
from __future__ import print_function

import re
import tensorflow as tf
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
import tensorflow as tf # type: ignore
from tensorflow.python.framework import load_library # type: ignore
from tensorflow.python.framework import ops # type: ignore
from tensorflow.python.platform import resource_loader # type: ignore

from bluefog.common.basics import BlueFogBasics
from bluefog.common.util import get_ext_suffix
Expand Down
2 changes: 1 addition & 1 deletion bluefog/tensorflow/optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# limitations under the License.
# ==============================================================================

import tensorflow as tf
import tensorflow as tf # type: ignore

from bluefog.tensorflow.mpi_ops import allreduce, broadcast, size
from bluefog.tensorflow.util import _executing_eagerly, _cache
Expand Down
4 changes: 2 additions & 2 deletions bluefog/tensorflow/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@

from distutils.version import LooseVersion

import tensorflow
import tensorflow # type: ignore

# Eager Mode has been introduced in TF 1.7.0
if LooseVersion(tensorflow.__version__) >= LooseVersion('1.7.0'):
from tensorflow.python.eager import context
from tensorflow.python.eager import context # type: ignore
_has_eager = True
else:
_has_eager = False
Expand Down
11 changes: 6 additions & 5 deletions bluefog/torch/optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import warnings

import torch
from torch.optim import Optimizer
import bluefog.torch as bf

class CommunicationType(Enum):
Expand Down Expand Up @@ -163,7 +164,7 @@ def _timeline_forward_end_hook(module, *unused):
*pre_forward_hook_handles, *forward_end_hook_handles]


class _DistributedOptimizer(torch.optim.Optimizer):
class _DistributedOptimizer(Optimizer):
def __init__(self, params, model, backward_passes_per_step=1):
super(self.__class__, self).__init__(params)

Expand Down Expand Up @@ -294,7 +295,7 @@ def zero_grad(self):
return super(self.__class__, self).zero_grad()


class _DistributedReduceOptimizer(torch.optim.Optimizer):
class _DistributedReduceOptimizer(Optimizer):
""" A distributed optimizer wrapper over torch optimizer.

Arguments:
Expand Down Expand Up @@ -482,7 +483,7 @@ def step(self, closure=None):
return super(self.__class__, self).step(closure)


class _DistributedAdaptThenCombineOptimizer(torch.optim.Optimizer):
class _DistributedAdaptThenCombineOptimizer(Optimizer):
def __init__(self, params, model, communication_type, backward_passes_per_step=1):
super(self.__class__, self).__init__(params)

Expand Down Expand Up @@ -841,7 +842,7 @@ def zero_grad(self):
return super(self.__class__, self).zero_grad()


class _DistributedWinOptimizer(torch.optim.Optimizer):
class _DistributedWinOptimizer(Optimizer):

def __init__(self, params, model, num_steps_per_communication, window_prefix, pull_style):
super(self.__class__, self).__init__(params)
Expand Down Expand Up @@ -1023,7 +1024,7 @@ def step(self, closure=None):
return super(self.__class__, self).step(closure)


class _DistributedPushSumOptimizer(torch.optim.Optimizer):
class _DistributedPushSumOptimizer(Optimizer):

def __init__(self, params, model, num_steps_per_communication):
super(self.__class__, self).__init__(params)
Expand Down
6 changes: 3 additions & 3 deletions bluefog/torch/topology_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import bluefog.torch as bf


def _check_ranks(rank_list: List[Any], self_rank: int, size: int) -> [bool, str]:
def _check_ranks(rank_list: List[Any], self_rank: int, size: int) -> Tuple[bool, str]:
for rank in rank_list:
if not isinstance(rank, int):
return False, "contain element that is not integer."
Expand All @@ -21,7 +21,7 @@ def _check_ranks(rank_list: List[Any], self_rank: int, size: int) -> [bool, str]

def InferSourceFromDestinationRanks(
dst_ranks: List[int], construct_adjacency_matrix: bool = False,
) -> Union[List[int], Tuple[List[int], np.array]]:
) -> Union[List[int], Tuple[List[int], np.ndarray]]:
"""Infer the source ranks from destination ranks. This is collective communication call.

Args:
Expand Down Expand Up @@ -49,7 +49,7 @@ def InferSourceFromDestinationRanks(

def InferDestinationFromSourceRanks(
src_ranks: List[int], construct_adjacency_matrix: bool = False,
) -> Union[List[int], np.array]:
) -> Union[List[int], np.ndarray]:
"""Infer the destination ranks from source ranks. This is collective communication call.

Args:
Expand Down
4 changes: 2 additions & 2 deletions examples/pytorch_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data.distributed
from torchvision import models
from torchvision import models # type: ignore
import bluefog.torch as bf
from bluefog.common import topology_util

Expand Down Expand Up @@ -66,7 +66,7 @@
args.cuda = not args.no_cuda and torch.cuda.is_available()

if args.dist_optimizer == 'horovod':
import horovod.torch as bf
import horovod.torch as bf # type: ignore

bf.init()

Expand Down
7 changes: 4 additions & 3 deletions examples/pytorch_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from __future__ import print_function

from bluefog.common import topology_util
from typing import Tuple, List
import bluefog.torch as bf
import argparse
import os
Expand All @@ -28,7 +29,7 @@
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data.distributed
from torchvision import datasets, transforms
from torchvision import datasets, transforms # type: ignore

sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), "..")))
Expand Down Expand Up @@ -75,7 +76,7 @@

if args.dist_optimizer == 'horovod':
print("importing horovod")
import horovod.torch as bf
import horovod.torch as bf # type: ignore

bf.init()

Expand Down Expand Up @@ -304,7 +305,7 @@ def test(record):
)
record.append((test_loss, 100.0 * test_accuracy))

test_record = []
test_record: List[Tuple[float, float]] = []
for epoch in range(1, args.epochs + 1):
train(epoch)
test(test_record)
Expand Down
8 changes: 4 additions & 4 deletions examples/pytorch_resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data.distributed
from torchvision import datasets, transforms, models
import tensorboardX
from tqdm import tqdm
from torchvision import datasets, transforms, models # type: ignore
import tensorboardX # type: ignore
from tqdm import tqdm # type: ignore

sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), "..")))
Expand Down Expand Up @@ -96,7 +96,7 @@

if args.dist_optimizer == 'horovod':
print("importing horovod")
import horovod.torch as bf
import horovod.torch as bf # type: ignore

# Bluefog: initialize library.
bf.init()
Expand Down
5 changes: 5 additions & 0 deletions scripts/.mypy.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[mypy]

[mypy-matplotlib.*,networkx.*,pytest.*,numpy.*]
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Based on my understanding, I think we can add more common libraries to .mypy.ini

[mypy-matplotlib.,networkx.,pytest.,numpy.,tensorflow.,torch.,psutil.*]
follow_imports = silent
ignore_missing_imports = true

follow_imports = silent
ignore_missing_imports = true
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: new line at end of file

Loading