Skip to content
Snippets Groups Projects
Select Git revision
  • efecf3548e22d1a89b107020033fe29d79b0ec1c
  • master default
  • renovate/django-split-settings-1.x
  • renovate/djangorestframework-3.x
  • main
  • 520-improve-trackmanager
  • 520-fix-scheduling
  • 520-akowner
  • 520-status
  • 520-message-resolved
  • 520-improve-scheduling-2
  • renovate/django-bootstrap5-24.x
  • 520-improve-submission
  • 520-improve-scheduling
  • 520-improve-wall
  • 520-fix-event-wizard-datepicker
  • 520-upgrades
  • renovate/tzdata-2023.x
  • renovate/django-5.x
  • renovate/fontawesomefree-6.x
  • renovate/sphinx-rtd-theme-2.x
  • renovate/sphinxcontrib-apidoc-0.x
22 results

forms.py

Blame
  • Forked from KIF / AKPlanning
    Source project has a limited visibility.
    Code owners
    Assign users and groups as approvers for specific file changes. Learn more.
    losses.py 4.25 KiB
    # coding=utf-8
    # Copyright 2020 The Google Research Authors.
    #
    # Licensed under the Apache License, Version 2.0 (the "License");
    # you may not use this file except in compliance with the License.
    # You may obtain a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS,
    # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # See the License for the specific language governing permissions and
    # limitations under the License.
    
    """All functions related to loss computation and optimization.
    """
    
    import torch
    import torch.optim as optim
    import numpy as np
    from models import utils as mutils
    from sde_lib import VESDE
    
    import logging
    
    
    def get_optimizer(config, params):
      """Returns a flax optimizer object based on `config`."""
      if config.optim.optimizer == 'Adam':
        optimizer = optim.Adam(params, lr=config.optim.lr, betas=(config.optim.beta1, 0.999), eps=config.optim.eps,
                               weight_decay=config.optim.weight_decay)
      else:
        raise NotImplementedError(
          f'Optimizer {config.optim.optimizer} not supported yet!')
    
      return optimizer
    
    
    def optimization_manager(config):
      """Returns an optimize_fn based on `config`."""
    
    
    
    def optimize_fn(optimizer, params, step, lr, warmup, grad_clip):
      """Optimizes with warmup and gradient clipping (disabled if negative)."""
      if warmup > 0:
        for g in optimizer.param_groups:
          g['lr'] = lr * np.minimum(step / warmup, 1.0)
      if grad_clip >= 0:
        torch.nn.utils.clip_grad_norm_(params, max_norm=grad_clip)
      optimizer.step()
    
    
    def loss_fn(model, sde, batch, reduce_mean, train):
      """Compute the loss function.
    
      Args:
        model: A score model.
        batch: A mini-batch of training data.
    
      Returns:
        loss: A scalar that represents the average loss value across the mini-batch.
      """
    
      eps = 1e-5
      
      t = torch.rand(batch.shape[0], device=batch.device) * (sde.T - eps) + eps
      z = torch.randn_like(batch)
      mean, std = sde.marginal_prob(batch, t)
      perturbed_data = mean + std[:, None, None, None] * z
      score = mutils.score_fn(model, sde, perturbed_data, t, train)
    
      losses = torch.square(score * std[:, None, None, None] + z)
    
      reduce_op = torch.mean if reduce_mean else lambda *args, **kwargs: 0.5 * torch.sum(*args, **kwargs)
      losses = reduce_op(losses.reshape(losses.shape[0], -1), dim=-1)
    
      loss = torch.mean(losses)
      return loss
    
    
    def get_step_fn(sde, train, reduce_mean=False, continuous=True, likelihood_weighting=False):
      """Create a one-step training/evaluation function.
    
      Args:
        sde: An `sde_lib.SDE` object that represents the forward SDE.
        reduce_mean: If `True`, average the loss across data dimensions. Otherwise sum the loss across data dimensions.
        continuous: `True` indicates that the model is defined to take continuous time steps.
        likelihood_weighting: If `True`, weight the mixture of score matching losses according to
          https://arxiv.org/abs/2101.09258; otherwise use the weighting recommended by our paper.
    
      Returns:
        A one-step function for training or evaluation.
      """
    
    def step_fn(state, sde, batch, config, train):
      """Running one step of training or evaluation.
    
      This function will undergo `jax.lax.scan` so that multiple steps can be pmapped and jit-compiled together
      for faster execution.
    
      Args:
        state: A dictionary of training information, containing the score model, optimizer,
         EMA status, and number of optimization steps.
        batch: A mini-batch of training/evaluation data.
    
      Returns:
        loss: The average loss value of this state.
      """
      model = state['model']
      if train:
        optimizer = state['optimizer']
        optimizer.zero_grad()
        loss = loss_fn(model, sde, batch, config.training.reduce_mean, train)
        loss.backward()
        optimize_fn(optimizer, model.parameters(), step=state['step'], lr=config.optim.lr, warmup=config.optim.warmup, grad_clip=config.optim.grad_clip)
        state['step'] += 1
        state['ema'].update(model.parameters())
      else:
        with torch.no_grad():
          ema = state['ema']
          ema.store(model.parameters())
          ema.copy_to(model.parameters())
          loss = loss_fn(model, sde, batch, config.reduce_mean, train)
          ema.restore(model.parameters())
    
      return loss