Skip to content

Commit

Permalink
[pre-commit.ci] pre-commit suggestions (#20532)
Browse files Browse the repository at this point in the history
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Jirka Borovec <[email protected]>
Co-authored-by: Jirka B <[email protected]>
  • Loading branch information
3 people authored Jan 7, 2025
1 parent 76f0c54 commit 1b26ac4
Show file tree
Hide file tree
Showing 289 changed files with 628 additions and 511 deletions.
2 changes: 1 addition & 1 deletion .github/CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ We welcome any useful contribution! For your convenience here's a recommended wo
1. Use tags in PR name for the following cases:

- **\[blocked by #<number>\]** if your work is dependent on other PRs.
- **\[wip\]** when you start to re-edit your work, mark it so no one will accidentally merge it in meantime.
- **[wip]** when you start to re-edit your work, mark it so no one will accidentally merge it in meantime.

### Question & Answer

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Brief description of all our automation tools used for boosting development perf
| .azure-pipelines/gpu-benchmarks.yml | Run speed/memory benchmarks for parity with vanila PyTorch. | GPU |
| .github/workflows/ci-flagship-apps.yml | Run end-2-end tests with full applications, including deployment to the production cloud. | CPU |
| .github/workflows/ci-tests-pytorch.yml | Run all tests except for accelerator-specific, standalone and slow tests. | CPU |
| .github/workflows/tpu-tests.yml | Run only TPU-specific tests. Requires that the PR title contains '\[TPU\]' | TPU |
| .github/workflows/tpu-tests.yml | Run only TPU-specific tests. Requires that the PR title contains '[TPU]' | TPU |

\* Each standalone test needs to be run in separate processes to avoid unwanted interactions between test cases.

Expand Down
8 changes: 4 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ ci:

repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
rev: v5.0.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
Expand Down Expand Up @@ -65,12 +65,12 @@ repos:
args: ["--in-place"]

- repo: https://github.com/sphinx-contrib/sphinx-lint
rev: v0.9.1
rev: v1.0.0
hooks:
- id: sphinx-lint

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.5.0
rev: v0.8.6
hooks:
# try to fix what is possible
- id: ruff
Expand All @@ -81,7 +81,7 @@ repos:
- id: ruff

- repo: https://github.com/executablebooks/mdformat
rev: 0.7.17
rev: 0.7.21
hooks:
- id: mdformat
additional_dependencies:
Expand Down
3 changes: 2 additions & 1 deletion examples/fabric/build_your_own_trainer/run.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import lightning as L
import torch
from torchmetrics.functional.classification.accuracy import accuracy
from trainer import MyCustomTrainer

import lightning as L


class MNISTModule(L.LightningModule):
def __init__(self) -> None:
Expand Down
7 changes: 4 additions & 3 deletions examples/fabric/build_your_own_trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,16 @@
from functools import partial
from typing import Any, Literal, Optional, Union, cast

import lightning as L
import torch
from lightning_utilities import apply_to_collection
from tqdm import tqdm

import lightning as L
from lightning.fabric.accelerators import Accelerator
from lightning.fabric.loggers import Logger
from lightning.fabric.strategies import Strategy
from lightning.fabric.wrappers import _unwrap_objects
from lightning.pytorch.utilities.model_helpers import is_overridden
from lightning_utilities import apply_to_collection
from tqdm import tqdm


class MyCustomTrainer:
Expand Down
3 changes: 2 additions & 1 deletion examples/fabric/dcgan/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,10 @@
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.utils
from lightning.fabric import Fabric, seed_everything
from torchvision.datasets import CelebA

from lightning.fabric import Fabric, seed_everything

# Root directory for dataset
dataroot = "data/"
# Number of workers for dataloader
Expand Down
7 changes: 4 additions & 3 deletions examples/fabric/fp8_distributed_transformer/train.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
import lightning as L
import torch
import torch.nn as nn
import torch.nn.functional as F
from lightning.fabric.strategies import ModelParallelStrategy
from lightning.pytorch.demos import Transformer, WikiText2
from torch.distributed._composable.fsdp.fully_shard import fully_shard
from torch.distributed.device_mesh import DeviceMesh
from torch.utils.data import DataLoader
from torchao.float8 import Float8LinearConfig, convert_to_float8_training
from tqdm import tqdm

import lightning as L
from lightning.fabric.strategies import ModelParallelStrategy
from lightning.pytorch.demos import Transformer, WikiText2


def configure_model(model: nn.Module, device_mesh: DeviceMesh) -> nn.Module:
float8_config = Float8LinearConfig(
Expand Down
3 changes: 2 additions & 1 deletion examples/fabric/image_classifier/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,12 @@
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as T
from lightning.fabric import Fabric, seed_everything
from torch.optim.lr_scheduler import StepLR
from torchmetrics.classification import Accuracy
from torchvision.datasets import MNIST

from lightning.fabric import Fabric, seed_everything

DATASETS_PATH = path.join(path.dirname(__file__), "..", "..", "..", "Datasets")


Expand Down
3 changes: 2 additions & 1 deletion examples/fabric/kfold_cv/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,13 @@
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as T
from lightning.fabric import Fabric, seed_everything
from sklearn import model_selection
from torch.utils.data import DataLoader, SubsetRandomSampler
from torchmetrics.classification import Accuracy
from torchvision.datasets import MNIST

from lightning.fabric import Fabric, seed_everything

DATASETS_PATH = path.join(path.dirname(__file__), "..", "..", "..", "Datasets")


Expand Down
5 changes: 3 additions & 2 deletions examples/fabric/language_model/train.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import lightning as L
import torch
import torch.nn.functional as F
from lightning.pytorch.demos import Transformer, WikiText2
from torch.utils.data import DataLoader, random_split

import lightning as L
from lightning.pytorch.demos import Transformer, WikiText2


def main():
L.seed_everything(42)
Expand Down
1 change: 1 addition & 0 deletions examples/fabric/meta_learning/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import cherry
import learn2learn as l2l
import torch

from lightning.fabric import Fabric, seed_everything


Expand Down
2 changes: 1 addition & 1 deletion examples/fabric/reinforcement_learning/rl/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@
import gymnasium as gym
import torch
import torch.nn.functional as F
from lightning.pytorch import LightningModule
from torch import Tensor
from torch.distributions import Categorical
from torchmetrics import MeanMetric

from lightning.pytorch import LightningModule
from rl.loss import entropy_loss, policy_loss, value_loss
from rl.utils import layer_init

Expand Down
5 changes: 3 additions & 2 deletions examples/fabric/reinforcement_learning/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,14 @@
import gymnasium as gym
import torch
import torchmetrics
from lightning.fabric import Fabric
from lightning.fabric.loggers import TensorBoardLogger
from rl.agent import PPOLightningAgent
from rl.utils import linear_annealing, make_env, parse_args, test
from torch import Tensor
from torch.utils.data import BatchSampler, DistributedSampler, RandomSampler

from lightning.fabric import Fabric
from lightning.fabric.loggers import TensorBoardLogger


def train(
fabric: Fabric,
Expand Down
11 changes: 6 additions & 5 deletions examples/fabric/reinforcement_learning/train_fabric_decoupled.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,18 @@

import gymnasium as gym
import torch
from lightning.fabric import Fabric
from lightning.fabric.loggers import TensorBoardLogger
from lightning.fabric.plugins.collectives import TorchCollective
from lightning.fabric.plugins.collectives.collective import CollectibleGroup
from lightning.fabric.strategies import DDPStrategy
from rl.agent import PPOLightningAgent
from rl.utils import linear_annealing, make_env, parse_args, test
from torch.distributed.algorithms.join import Join
from torch.utils.data import BatchSampler, DistributedSampler, RandomSampler
from torchmetrics import MeanMetric

from lightning.fabric import Fabric
from lightning.fabric.loggers import TensorBoardLogger
from lightning.fabric.plugins.collectives import TorchCollective
from lightning.fabric.plugins.collectives.collective import CollectibleGroup
from lightning.fabric.strategies import DDPStrategy


@torch.no_grad()
def player(args, world_collective: TorchCollective, player_trainer_collective: TorchCollective):
Expand Down
2 changes: 1 addition & 1 deletion examples/fabric/tensor_parallel/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,5 +41,5 @@ Training successfully completed!
Peak memory usage: 17.95 GB
```

> \[!NOTE\]
> [!NOTE]
> The `ModelParallelStrategy` is experimental and subject to change. Report issues on [GitHub](https://github.com/Lightning-AI/pytorch-lightning/issues).
5 changes: 3 additions & 2 deletions examples/fabric/tensor_parallel/train.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import lightning as L
import torch
import torch.nn.functional as F
from data import RandomTokenDataset
from lightning.fabric.strategies import ModelParallelStrategy
from model import ModelArgs, Transformer
from parallelism import parallelize
from torch.distributed.tensor.parallel import loss_parallel
from torch.utils.data import DataLoader

import lightning as L
from lightning.fabric.strategies import ModelParallelStrategy


def train():
strategy = ModelParallelStrategy(
Expand Down
5 changes: 3 additions & 2 deletions examples/pytorch/basics/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,14 @@

import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader, random_split

from lightning.pytorch import LightningDataModule, LightningModule, Trainer, callbacks, cli_lightning_logo
from lightning.pytorch.cli import LightningCLI
from lightning.pytorch.demos.mnist_datamodule import MNIST
from lightning.pytorch.utilities import rank_zero_only
from lightning.pytorch.utilities.imports import _TORCHVISION_AVAILABLE
from torch import nn
from torch.utils.data import DataLoader, random_split

if _TORCHVISION_AVAILABLE:
import torchvision
Expand Down
5 changes: 3 additions & 2 deletions examples/pytorch/basics/backbone_image_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,13 @@
from typing import Optional

import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split

from lightning.pytorch import LightningDataModule, LightningModule, cli_lightning_logo
from lightning.pytorch.cli import LightningCLI
from lightning.pytorch.demos.mnist_datamodule import MNIST
from lightning.pytorch.utilities.imports import _TORCHVISION_AVAILABLE
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split

if _TORCHVISION_AVAILABLE:
from torchvision import transforms
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/basics/profiler_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import torch
import torchvision
import torchvision.transforms as T

from lightning.pytorch import LightningDataModule, LightningModule, cli_lightning_logo
from lightning.pytorch.cli import LightningCLI
from lightning.pytorch.profilers.pytorch import PyTorchProfiler
Expand Down
5 changes: 3 additions & 2 deletions examples/pytorch/basics/transformer.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import lightning as L
import torch
import torch.nn.functional as F
from lightning.pytorch.demos import Transformer, WikiText2
from torch.utils.data import DataLoader, random_split

import lightning as L
from lightning.pytorch.demos import Transformer, WikiText2


class LanguageModel(L.LightningModule):
def __init__(self, vocab_size):
Expand Down
3 changes: 2 additions & 1 deletion examples/pytorch/bug_report/bug_report_model.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import os

import torch
from lightning.pytorch import LightningModule, Trainer
from torch.utils.data import DataLoader, Dataset

from lightning.pytorch import LightningModule, Trainer


class RandomDataset(Dataset):
def __init__(self, size, length):
Expand Down
11 changes: 6 additions & 5 deletions examples/pytorch/domain_templates/computer_vision_fine_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,6 @@

import torch
import torch.nn.functional as F
from lightning.pytorch import LightningDataModule, LightningModule, cli_lightning_logo
from lightning.pytorch.callbacks.finetuning import BaseFinetuning
from lightning.pytorch.cli import LightningCLI
from lightning.pytorch.utilities import rank_zero_info
from lightning.pytorch.utilities.model_helpers import get_torchvision_model
from torch import nn, optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.optim.optimizer import Optimizer
Expand All @@ -60,6 +55,12 @@
from torchvision.datasets import ImageFolder
from torchvision.datasets.utils import download_and_extract_archive

from lightning.pytorch import LightningDataModule, LightningModule, cli_lightning_logo
from lightning.pytorch.callbacks.finetuning import BaseFinetuning
from lightning.pytorch.cli import LightningCLI
from lightning.pytorch.utilities import rank_zero_info
from lightning.pytorch.utilities.model_helpers import get_torchvision_model

log = logging.getLogger(__name__)
DATA_URL = "https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import torch
import torch.nn as nn
import torch.nn.functional as F

from lightning.pytorch import cli_lightning_logo
from lightning.pytorch.core import LightningModule
from lightning.pytorch.demos.mnist_datamodule import MNISTDataModule
Expand Down
5 changes: 3 additions & 2 deletions examples/pytorch/domain_templates/imagenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,14 @@
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from torchmetrics import Accuracy

from lightning.pytorch import LightningModule
from lightning.pytorch.callbacks import ModelCheckpoint, TQDMProgressBar
from lightning.pytorch.cli import LightningCLI
from lightning.pytorch.strategies import ParallelStrategy
from lightning.pytorch.utilities.model_helpers import get_torchvision_model
from torch.utils.data import Dataset
from torchmetrics import Accuracy


class ImageNetLightningModel(LightningModule):
Expand Down
3 changes: 2 additions & 1 deletion examples/pytorch/domain_templates/reinforce_learn_Qnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,12 @@
import torch
import torch.nn as nn
import torch.optim as optim
from lightning.pytorch import LightningModule, Trainer, cli_lightning_logo, seed_everything
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from torch.utils.data.dataset import IterableDataset

from lightning.pytorch import LightningModule, Trainer, cli_lightning_logo, seed_everything


class DQN(nn.Module):
"""Simple MLP network.
Expand Down
3 changes: 2 additions & 1 deletion examples/pytorch/domain_templates/reinforce_learn_ppo.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,13 @@

import gym
import torch
from lightning.pytorch import LightningModule, Trainer, cli_lightning_logo, seed_everything
from torch import nn
from torch.distributions import Categorical, Normal
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader, IterableDataset

from lightning.pytorch import LightningModule, Trainer, cli_lightning_logo, seed_everything


def create_mlp(input_shape: tuple[int], n_actions: int, hidden_size: int = 128):
"""Simple Multi-Layer Perceptron network."""
Expand Down
Loading

0 comments on commit 1b26ac4

Please sign in to comment.