Skip to content

Module

Base module to train ctc models

BaseCTCModule (LightningModule)

Source code in thunder/module.py
class BaseCTCModule(pl.LightningModule):
    def __init__(
        self,
        encoder: nn.Module,
        decoder: nn.Module,
        audio_transform: nn.Module,
        text_transform: BatchTextTransformer,
        optimizer_class: OptimizerBuilderType = torch.optim.AdamW,
        optimizer_kwargs: Dict = None,
        lr_scheduler_class: SchedulerBuilderType = None,
        lr_scheduler_kwargs: Dict = None,
        encoder_final_dimension: int = None,
    ):
        """Base module for all systems that follow the same CTC training procedure.

        Args:
            encoder: Encoder part of the model
            decoder: Decoder part of the model
            audio_transform: Transforms raw audio into the features the encoder expects
            text_transform: Class that encodes and decodes all textual representation
            optimizer_class: Optimizer to use during training.
            optimizer_kwargs: Optional extra kwargs to the optimizer.
            lr_scheduler_class: Optional class to use a learning rate scheduler with the optimizer.
            lr_scheduler_kwargs: Optional extra kwargs to the learning rate scheduler.
            encoder_final_dimension: number of features in the encoder output.
        """
        super().__init__()

        self.encoder = encoder
        self.decoder = decoder
        self.audio_transform = audio_transform
        self.text_transform = text_transform

        self.optimizer_class = optimizer_class
        self.optimizer_kwargs = optimizer_kwargs or {}
        self.lr_scheduler_class = lr_scheduler_class
        self.lr_scheduler_kwargs = lr_scheduler_kwargs or {}
        self.lr_scheduler_interval = self.lr_scheduler_kwargs.pop("interval", "step")

        self.encoder_final_dimension = encoder_final_dimension

        # Metrics
        self.validation_cer = CharErrorRate()
        self.validation_wer = WordErrorRate()
        self.example_input_array = (
            torch.randn((10, 16000)),
            torch.randint(100, 16000, (10,)),
        )

    def forward(self, x: Tensor, lengths: Tensor) -> Tuple[Tensor, Optional[Tensor]]:
        """Process the audio tensor to create the predictions.

        Args:
            x: Audio tensor of shape [batch_size, time]
            lengths: corresponding length of each element in the input tensor.

        Returns:
            Tensor with the predictions.
        """
        features, feature_lengths = self.audio_transform(x, lengths)
        encoded, out_lengths = self.encoder(features, feature_lengths)
        return self.decoder(encoded), out_lengths

    @torch.jit.export
    def predict(self, x: Tensor) -> List[str]:
        """Use this function during inference to predict.

        Args:
            x: Audio tensor of shape [batch_size, time]

        Returns:
            A list of strings, each one contains the corresponding transcription to the original batch element.
        """
        audio_lengths = torch.tensor(x.shape[0] * [x.shape[-1]], device=x.device)
        pred, _ = self(x, audio_lengths)
        return self.text_transform.decode_prediction(pred.argmax(1))

    def training_step(
        self, batch: Tuple[torch.Tensor, torch.Tensor, List[str]], batch_idx: int
    ) -> torch.Tensor:
        """Training step. Check the original lightning docs for more information.

        Args:
            batch: Tuple containing the batched audios, lengths and the corresponding text labels.
            batch_idx: Batch index

        Returns:
            Training loss for that batch
        """
        audio, audio_lengths, texts = batch
        y, y_lengths = self.text_transform.encode(texts, device=self.device)

        probabilities, prob_lengths = self(audio, audio_lengths)
        loss = calculate_ctc(
            probabilities,
            y,
            prob_lengths,
            y_lengths,
            self.text_transform.vocab.blank_idx,
        )

        self.log("loss/train_loss", loss)
        return loss

    def validation_step(
        self, batch: Tuple[torch.Tensor, torch.Tensor, List[str]], batch_idx: int
    ) -> torch.Tensor:
        """Validation step. Check the original lightning docs for more information.

        Args:
            batch: Tuple containing the batched audios, lengths and the corresponding text labels.
            batch_idx: Batch index

        Returns:
            Validation loss for that batch
        """
        audio, audio_lengths, texts = batch
        y, y_lengths = self.text_transform.encode(texts, device=self.device)

        probabilities, prob_lengths = self(audio, audio_lengths)
        loss = calculate_ctc(
            probabilities,
            y,
            prob_lengths,
            y_lengths,
            self.text_transform.vocab.blank_idx,
        )

        decoded_preds = self.text_transform.decode_prediction(probabilities.argmax(1))
        decoded_targets = self.text_transform.decode_prediction(
            y, remove_repeated=False
        )
        self.validation_cer(decoded_preds, decoded_targets)
        self.validation_wer(decoded_preds, decoded_targets)

        self.log("loss/val_loss", loss)
        self.log("metrics/cer", self.validation_cer, on_epoch=True)
        self.log("metrics/wer", self.validation_wer, on_epoch=True)
        return loss

    def _update_special_optimizer_arg(self, original_kwargs: Dict) -> Dict:
        updated_kwargs = original_kwargs.copy()

        total_steps_arg = updated_kwargs.pop("total_steps_arg", None)
        if total_steps_arg:
            updated_kwargs[total_steps_arg] = self.trainer.estimated_stepping_batches
        return updated_kwargs

    def configure_optimizers(self) -> Union[torch.optim.Optimizer, Dict[str, Any]]:
        optim_kwargs = self._update_special_optimizer_arg(self.optimizer_kwargs)
        optimizer = self.optimizer_class(
            filter(lambda p: p.requires_grad, self.parameters()), **optim_kwargs
        )
        if not self.lr_scheduler_class:
            return optimizer

        scheduler_kwargs = self._update_special_optimizer_arg(self.lr_scheduler_kwargs)
        lr_scheduler = self.lr_scheduler_class(optimizer, **scheduler_kwargs)
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_scheduler,
                "interval": self.lr_scheduler_interval,
            },
        }

__init__(self, encoder, decoder, audio_transform, text_transform, optimizer_class=<class 'torch.optim.adamw.AdamW'>, optimizer_kwargs=None, lr_scheduler_class=None, lr_scheduler_kwargs=None, encoder_final_dimension=None) special

Base module for all systems that follow the same CTC training procedure.

Parameters:

Name Type Description Default
encoder Module

Encoder part of the model

required
decoder Module

Decoder part of the model

required
audio_transform Module

Transforms raw audio into the features the encoder expects

required
text_transform BatchTextTransformer

Class that encodes and decodes all textual representation

required
optimizer_class Union[Type[torch.optim.optimizer.Optimizer], Callable[..., torch.optim.optimizer.Optimizer]]

Optimizer to use during training.

<class 'torch.optim.adamw.AdamW'>
optimizer_kwargs Dict

Optional extra kwargs to the optimizer.

None
lr_scheduler_class Union[Type[torch.optim.lr_scheduler._LRScheduler], Type[torch.optim.lr_scheduler.ReduceLROnPlateau], Callable[..., Union[torch.optim.lr_scheduler._LRScheduler, torch.optim.lr_scheduler.ReduceLROnPlateau]]]

Optional class to use a learning rate scheduler with the optimizer.

None
lr_scheduler_kwargs Dict

Optional extra kwargs to the learning rate scheduler.

None
encoder_final_dimension int

number of features in the encoder output.

None
Source code in thunder/module.py
def __init__(
    self,
    encoder: nn.Module,
    decoder: nn.Module,
    audio_transform: nn.Module,
    text_transform: BatchTextTransformer,
    optimizer_class: OptimizerBuilderType = torch.optim.AdamW,
    optimizer_kwargs: Dict = None,
    lr_scheduler_class: SchedulerBuilderType = None,
    lr_scheduler_kwargs: Dict = None,
    encoder_final_dimension: int = None,
):
    """Base module for all systems that follow the same CTC training procedure.

    Args:
        encoder: Encoder part of the model
        decoder: Decoder part of the model
        audio_transform: Transforms raw audio into the features the encoder expects
        text_transform: Class that encodes and decodes all textual representation
        optimizer_class: Optimizer to use during training.
        optimizer_kwargs: Optional extra kwargs to the optimizer.
        lr_scheduler_class: Optional class to use a learning rate scheduler with the optimizer.
        lr_scheduler_kwargs: Optional extra kwargs to the learning rate scheduler.
        encoder_final_dimension: number of features in the encoder output.
    """
    super().__init__()

    self.encoder = encoder
    self.decoder = decoder
    self.audio_transform = audio_transform
    self.text_transform = text_transform

    self.optimizer_class = optimizer_class
    self.optimizer_kwargs = optimizer_kwargs or {}
    self.lr_scheduler_class = lr_scheduler_class
    self.lr_scheduler_kwargs = lr_scheduler_kwargs or {}
    self.lr_scheduler_interval = self.lr_scheduler_kwargs.pop("interval", "step")

    self.encoder_final_dimension = encoder_final_dimension

    # Metrics
    self.validation_cer = CharErrorRate()
    self.validation_wer = WordErrorRate()
    self.example_input_array = (
        torch.randn((10, 16000)),
        torch.randint(100, 16000, (10,)),
    )

configure_optimizers(self)

Choose what optimizers and learning-rate schedulers to use in your optimization. Normally you'd need one. But in the case of GANs or similar you might have multiple.

Returns:

Type Description
Union[torch.optim.optimizer.Optimizer, Dict[str, Any]]

Any of these 6 options.

  • Single optimizer.
  • List or Tuple of optimizers.
  • Two lists - The first list has multiple optimizers, and the second has multiple LR schedulers (or multiple lr_scheduler_config).
  • Dictionary, with an "optimizer" key, and (optionally) a "lr_scheduler" key whose value is a single LR scheduler or lr_scheduler_config.
  • Tuple of dictionaries as described above, with an optional "frequency" key.
  • None - Fit will run without any optimizer.

The lr_scheduler_config is a dictionary which contains the scheduler and its associated configuration. The default configuration is shown below.

.. code-block:: python

lr_scheduler_config = {
    # REQUIRED: The scheduler instance
    "scheduler": lr_scheduler,
    # The unit of the scheduler's step size, could also be 'step'.
    # 'epoch' updates the scheduler on epoch end whereas 'step'
    # updates it after a optimizer update.
    "interval": "epoch",
    # How many epochs/steps should pass between calls to
    # `scheduler.step()`. 1 corresponds to updating the learning
    # rate after every epoch/step.
    "frequency": 1,
    # Metric to to monitor for schedulers like `ReduceLROnPlateau`
    "monitor": "val_loss",
    # If set to `True`, will enforce that the value specified 'monitor'
    # is available when the scheduler is updated, thus stopping
    # training if not found. If set to `False`, it will only produce a warning
    "strict": True,
    # If using the `LearningRateMonitor` callback to monitor the
    # learning rate progress, this keyword can be used to specify
    # a custom logged name
    "name": None,
}

When there are schedulers in which the .step() method is conditioned on a value, such as the :class:torch.optim.lr_scheduler.ReduceLROnPlateau scheduler, Lightning requires that the lr_scheduler_config contains the keyword "monitor" set to the metric name that the scheduler should be conditioned on.

.. testcode::

# The ReduceLROnPlateau scheduler requires a monitor
def configure_optimizers(self):
    optimizer = Adam(...)
    return {
        "optimizer": optimizer,
        "lr_scheduler": {
            "scheduler": ReduceLROnPlateau(optimizer, ...),
            "monitor": "metric_to_track",
            "frequency": "indicates how often the metric is updated"
            # If "monitor" references validation metrics, then "frequency" should be set to a
            # multiple of "trainer.check_val_every_n_epoch".
        },
    }


# In the case of two optimizers, only one using the ReduceLROnPlateau scheduler
def configure_optimizers(self):
    optimizer1 = Adam(...)
    optimizer2 = SGD(...)
    scheduler1 = ReduceLROnPlateau(optimizer1, ...)
    scheduler2 = LambdaLR(optimizer2, ...)
    return (
        {
            "optimizer": optimizer1,
            "lr_scheduler": {
                "scheduler": scheduler1,
                "monitor": "metric_to_track",
            },
        },
        {"optimizer": optimizer2, "lr_scheduler": scheduler2},
    )

Metrics can be made available to monitor by simply logging it using self.log('metric_to_track', metric_val) in your :class:~pytorch_lightning.core.module.LightningModule.

Note

The frequency value specified in a dict along with the optimizer key is an int corresponding to the number of sequential batches optimized with the specific optimizer. It should be given to none or to all of the optimizers. There is a difference between passing multiple optimizers in a list, and passing multiple optimizers in dictionaries with a frequency of 1:

- In the former case, all optimizers will operate on the given batch in each optimization step.
- In the latter, only one optimizer will operate on the given batch at every step.

This is different from the frequency value specified in the lr_scheduler_config mentioned above.

.. code-block:: python

def configure_optimizers(self):
    optimizer_one = torch.optim.SGD(self.model.parameters(), lr=0.01)
    optimizer_two = torch.optim.SGD(self.model.parameters(), lr=0.01)
    return [
        {"optimizer": optimizer_one, "frequency": 5},
        {"optimizer": optimizer_two, "frequency": 10},
    ]

In this example, the first optimizer will be used for the first 5 steps, the second optimizer for the next 10 steps and that cycle will continue. If an LR scheduler is specified for an optimizer using the lr_scheduler key in the above dict, the scheduler will only be updated when its optimizer is being used.

Examples::

# most cases. no learning rate scheduler
def configure_optimizers(self):
    return Adam(self.parameters(), lr=1e-3)

# multiple optimizer case (e.g.: GAN)
def configure_optimizers(self):
    gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
    dis_opt = Adam(self.model_dis.parameters(), lr=0.02)
    return gen_opt, dis_opt

# example with learning rate schedulers
def configure_optimizers(self):
    gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
    dis_opt = Adam(self.model_dis.parameters(), lr=0.02)
    dis_sch = CosineAnnealing(dis_opt, T_max=10)
    return [gen_opt, dis_opt], [dis_sch]

# example with step-based learning rate schedulers
# each optimizer has its own scheduler
def configure_optimizers(self):
    gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
    dis_opt = Adam(self.model_dis.parameters(), lr=0.02)
    gen_sch = {
        'scheduler': ExponentialLR(gen_opt, 0.99),
        'interval': 'step'  # called after each training step
    }
    dis_sch = CosineAnnealing(dis_opt, T_max=10) # called every epoch
    return [gen_opt, dis_opt], [gen_sch, dis_sch]

# example with optimizer frequencies
# see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1
# https://arxiv.org/abs/1704.00028
def configure_optimizers(self):
    gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
    dis_opt = Adam(self.model_dis.parameters(), lr=0.02)
    n_critic = 5
    return (
        {'optimizer': dis_opt, 'frequency': n_critic},
        {'optimizer': gen_opt, 'frequency': 1}
    )

Note

Some things to know:

  • Lightning calls .backward() and .step() on each optimizer as needed.
  • If learning rate scheduler is specified in configure_optimizers() with key "interval" (default "epoch") in the scheduler configuration, Lightning will call the scheduler's .step() method automatically in case of automatic optimization.
  • If you use 16-bit precision (precision=16), Lightning will automatically handle the optimizers.
  • If you use multiple optimizers, :meth:training_step will have an additional optimizer_idx parameter.
  • If you use :class:torch.optim.LBFGS, Lightning handles the closure function automatically for you.
  • If you use multiple optimizers, gradients will be calculated only for the parameters of current optimizer at each training step.
  • If you need to control how often those optimizers step or override the default .step() schedule, override the :meth:optimizer_step hook.
Source code in thunder/module.py
def configure_optimizers(self) -> Union[torch.optim.Optimizer, Dict[str, Any]]:
    optim_kwargs = self._update_special_optimizer_arg(self.optimizer_kwargs)
    optimizer = self.optimizer_class(
        filter(lambda p: p.requires_grad, self.parameters()), **optim_kwargs
    )
    if not self.lr_scheduler_class:
        return optimizer

    scheduler_kwargs = self._update_special_optimizer_arg(self.lr_scheduler_kwargs)
    lr_scheduler = self.lr_scheduler_class(optimizer, **scheduler_kwargs)
    return {
        "optimizer": optimizer,
        "lr_scheduler": {
            "scheduler": lr_scheduler,
            "interval": self.lr_scheduler_interval,
        },
    }

forward(self, x, lengths)

Process the audio tensor to create the predictions.

Parameters:

Name Type Description Default
x Tensor

Audio tensor of shape [batch_size, time]

required
lengths Tensor

corresponding length of each element in the input tensor.

required

Returns:

Type Description
Tuple[torch.Tensor, Optional[torch.Tensor]]

Tensor with the predictions.

Source code in thunder/module.py
def forward(self, x: Tensor, lengths: Tensor) -> Tuple[Tensor, Optional[Tensor]]:
    """Process the audio tensor to create the predictions.

    Args:
        x: Audio tensor of shape [batch_size, time]
        lengths: corresponding length of each element in the input tensor.

    Returns:
        Tensor with the predictions.
    """
    features, feature_lengths = self.audio_transform(x, lengths)
    encoded, out_lengths = self.encoder(features, feature_lengths)
    return self.decoder(encoded), out_lengths

predict(self, x)

Use this function during inference to predict.

Parameters:

Name Type Description Default
x Tensor

Audio tensor of shape [batch_size, time]

required

Returns:

Type Description
List[str]

A list of strings, each one contains the corresponding transcription to the original batch element.

Source code in thunder/module.py
@torch.jit.export
def predict(self, x: Tensor) -> List[str]:
    """Use this function during inference to predict.

    Args:
        x: Audio tensor of shape [batch_size, time]

    Returns:
        A list of strings, each one contains the corresponding transcription to the original batch element.
    """
    audio_lengths = torch.tensor(x.shape[0] * [x.shape[-1]], device=x.device)
    pred, _ = self(x, audio_lengths)
    return self.text_transform.decode_prediction(pred.argmax(1))

training_step(self, batch, batch_idx)

Training step. Check the original lightning docs for more information.

Parameters:

Name Type Description Default
batch Tuple[torch.Tensor, torch.Tensor, List[str]]

Tuple containing the batched audios, lengths and the corresponding text labels.

required
batch_idx int

Batch index

required

Returns:

Type Description
Tensor

Training loss for that batch

Source code in thunder/module.py
def training_step(
    self, batch: Tuple[torch.Tensor, torch.Tensor, List[str]], batch_idx: int
) -> torch.Tensor:
    """Training step. Check the original lightning docs for more information.

    Args:
        batch: Tuple containing the batched audios, lengths and the corresponding text labels.
        batch_idx: Batch index

    Returns:
        Training loss for that batch
    """
    audio, audio_lengths, texts = batch
    y, y_lengths = self.text_transform.encode(texts, device=self.device)

    probabilities, prob_lengths = self(audio, audio_lengths)
    loss = calculate_ctc(
        probabilities,
        y,
        prob_lengths,
        y_lengths,
        self.text_transform.vocab.blank_idx,
    )

    self.log("loss/train_loss", loss)
    return loss

validation_step(self, batch, batch_idx)

Validation step. Check the original lightning docs for more information.

Parameters:

Name Type Description Default
batch Tuple[torch.Tensor, torch.Tensor, List[str]]

Tuple containing the batched audios, lengths and the corresponding text labels.

required
batch_idx int

Batch index

required

Returns:

Type Description
Tensor

Validation loss for that batch

Source code in thunder/module.py
def validation_step(
    self, batch: Tuple[torch.Tensor, torch.Tensor, List[str]], batch_idx: int
) -> torch.Tensor:
    """Validation step. Check the original lightning docs for more information.

    Args:
        batch: Tuple containing the batched audios, lengths and the corresponding text labels.
        batch_idx: Batch index

    Returns:
        Validation loss for that batch
    """
    audio, audio_lengths, texts = batch
    y, y_lengths = self.text_transform.encode(texts, device=self.device)

    probabilities, prob_lengths = self(audio, audio_lengths)
    loss = calculate_ctc(
        probabilities,
        y,
        prob_lengths,
        y_lengths,
        self.text_transform.vocab.blank_idx,
    )

    decoded_preds = self.text_transform.decode_prediction(probabilities.argmax(1))
    decoded_targets = self.text_transform.decode_prediction(
        y, remove_repeated=False
    )
    self.validation_cer(decoded_preds, decoded_targets)
    self.validation_wer(decoded_preds, decoded_targets)

    self.log("loss/val_loss", loss)
    self.log("metrics/cer", self.validation_cer, on_epoch=True)
    self.log("metrics/wer", self.validation_wer, on_epoch=True)
    return loss