Skip to content

optimizers

Optimizers

Bases: BaseContainer

A container for model optimizers.

Source code in pytorch_adapt\containers\optimizers.py
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
class Optimizers(BaseContainer):
    """
    A container for model optimizers.
    """

    def __init__(self, *args, multipliers=None, **kwargs):
        """
        Arguments:
            *args: [```BaseContainer```][pytorch_adapt.containers.BaseContainer] arguments.
            multipliers: A dictionary mapping from
                optimizer name to lr multiplier. Each
                optimizer will have ```lr = lr * multiplier```
                upon initialization. If ```None```,
                then multiplier is 1.
            **kwargs:  [```BaseContainer```][pytorch_adapt.containers.BaseContainer]
                keyword arguments.
        """
        self.multipliers = c_f.default(multipliers, {})
        super().__init__(*args, **kwargs)

    def _create_with(self, other):
        c_f.assert_keys_are_present_cls(self, "multipliers", self)
        for k, v in self.items():
            if c_f.is_optimizer(v):
                continue
            class_ref, kwargs = v
            model = other[k]
            if c_f.has_no_parameters(model):
                self[k] = DoNothingOptimizer()
            else:
                kwargs = copy.deepcopy(kwargs)
                kwargs["lr"] *= self.multipliers.get(k, 1)
                self[k] = class_ref(model.parameters(), **kwargs)

    def step(self):
        """
        Calls ```.step()``` on all optimizers.
        """
        for v in self.values():
            v.step()

    def zero_grad(self):
        """
        Calls ```.zero_grad()``` on all optimizers.
        """
        for v in self.values():
            v.zero_grad()

    def merge(self, other):
        super().merge(other)
        self.multipliers.update(other.multipliers)

    def zero_back_step(self, loss, keys: List[str] = None):
        """
        Zeros gradients, computes gradients, and updates model weights.
        Arguments:
            loss: The loss on which ```.backward()``` is called.
            keys: The subset of optimizers on which to call
                ```.zero_grad()``` and ```.step()```.
                If ```None```, then all optimizers are used.
        """
        keys = c_f.default(keys, self.keys())
        optimizers = [self[k] for k in keys]
        c_f.zero_back_step(loss, optimizers)

__init__(*args, multipliers=None, **kwargs)

Parameters:

Name Type Description Default
*args

BaseContainer arguments.

()
multipliers

A dictionary mapping from optimizer name to lr multiplier. Each optimizer will have lr = lr * multiplier upon initialization. If None, then multiplier is 1.

None
**kwargs

BaseContainer keyword arguments.

{}
Source code in pytorch_adapt\containers\optimizers.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
def __init__(self, *args, multipliers=None, **kwargs):
    """
    Arguments:
        *args: [```BaseContainer```][pytorch_adapt.containers.BaseContainer] arguments.
        multipliers: A dictionary mapping from
            optimizer name to lr multiplier. Each
            optimizer will have ```lr = lr * multiplier```
            upon initialization. If ```None```,
            then multiplier is 1.
        **kwargs:  [```BaseContainer```][pytorch_adapt.containers.BaseContainer]
            keyword arguments.
    """
    self.multipliers = c_f.default(multipliers, {})
    super().__init__(*args, **kwargs)

step()

Calls .step() on all optimizers.

Source code in pytorch_adapt\containers\optimizers.py
43
44
45
46
47
48
def step(self):
    """
    Calls ```.step()``` on all optimizers.
    """
    for v in self.values():
        v.step()

zero_back_step(loss, keys=None)

Zeros gradients, computes gradients, and updates model weights.

Parameters:

Name Type Description Default
loss

The loss on which .backward() is called.

required
keys List[str]

The subset of optimizers on which to call .zero_grad() and .step(). If None, then all optimizers are used.

None
Source code in pytorch_adapt\containers\optimizers.py
61
62
63
64
65
66
67
68
69
70
71
72
def zero_back_step(self, loss, keys: List[str] = None):
    """
    Zeros gradients, computes gradients, and updates model weights.
    Arguments:
        loss: The loss on which ```.backward()``` is called.
        keys: The subset of optimizers on which to call
            ```.zero_grad()``` and ```.step()```.
            If ```None```, then all optimizers are used.
    """
    keys = c_f.default(keys, self.keys())
    optimizers = [self[k] for k in keys]
    c_f.zero_back_step(loss, optimizers)

zero_grad()

Calls .zero_grad() on all optimizers.

Source code in pytorch_adapt\containers\optimizers.py
50
51
52
53
54
55
def zero_grad(self):
    """
    Calls ```.zero_grad()``` on all optimizers.
    """
    for v in self.values():
        v.zero_grad()