Skip to content

aligners

AlignerHook

Bases: BaseWrapperHook

Computes an alignment loss (e.g MMD) based on features from two domains.

Source code in pytorch_adapt\hooks\aligners.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
class AlignerHook(BaseWrapperHook):
    """
    Computes an alignment loss (e.g MMD) based on features
    from two domains.
    """

    def __init__(
        self,
        loss_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None,
        hook: BaseHook = None,
        layer: str = "features",
        **kwargs,
    ):
        """
        Arguments:
            loss_fn: a function that computes a distance
                between two tensors. If ```None```,
                it defaults to [```MMDLoss```][pytorch_adapt.layers.MMDLoss].
            hook: the hook for computing features
            layer: the layer for which the loss is computed. Must be
                either ```"features"``` or ```"logits"```.
        """

        super().__init__(**kwargs)
        self.loss_fn = c_f.default(loss_fn, MMDLoss, {})
        if layer == "features":
            default_hook = FeaturesHook
        elif layer == "logits":
            default_hook = FeaturesAndLogitsHook
        else:
            raise ValueError("AlignerHook layer must be 'features' or 'logits'")
        self.hook = c_f.default(hook, default_hook, {})
        self.layer = layer

    def call(self, inputs, losses):
        outputs = self.hook(inputs, losses)[0]
        strs = c_f.filter(self.hook.out_keys, f"_{self.layer}$", ["^src", "^target"])
        [src, target] = c_f.extract([outputs, inputs], strs)
        confusion_loss = self.loss_fn(src, target)
        return outputs, {self._loss_keys()[0]: confusion_loss}

    def _loss_keys(self):
        return [f"{self.layer}_confusion_loss"]

__init__(loss_fn=None, hook=None, layer='features', **kwargs)

Parameters:

Name Type Description Default
loss_fn Callable[[torch.Tensor, torch.Tensor], torch.Tensor]

a function that computes a distance between two tensors. If None, it defaults to MMDLoss.

None
hook BaseHook

the hook for computing features

None
layer str

the layer for which the loss is computed. Must be either "features" or "logits".

'features'
Source code in pytorch_adapt\hooks\aligners.py
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
def __init__(
    self,
    loss_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None,
    hook: BaseHook = None,
    layer: str = "features",
    **kwargs,
):
    """
    Arguments:
        loss_fn: a function that computes a distance
            between two tensors. If ```None```,
            it defaults to [```MMDLoss```][pytorch_adapt.layers.MMDLoss].
        hook: the hook for computing features
        layer: the layer for which the loss is computed. Must be
            either ```"features"``` or ```"logits"```.
    """

    super().__init__(**kwargs)
    self.loss_fn = c_f.default(loss_fn, MMDLoss, {})
    if layer == "features":
        default_hook = FeaturesHook
    elif layer == "logits":
        default_hook = FeaturesAndLogitsHook
    else:
        raise ValueError("AlignerHook layer must be 'features' or 'logits'")
    self.hook = c_f.default(hook, default_hook, {})
    self.layer = layer

AlignerPlusCHook

Bases: BaseWrapperHook

Computes an alignment loss plus a classification loss, and then optimizes the models.

Source code in pytorch_adapt\hooks\aligners.py
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
class AlignerPlusCHook(BaseWrapperHook):
    """
    Computes an alignment loss plus a classification loss,
    and then optimizes the models.
    """

    def __init__(
        self,
        opts,
        weighter=None,
        reducer=None,
        loss_fn=None,
        aligner_hook=None,
        pre=None,
        post=None,
        softmax=True,
        **kwargs,
    ):
        super().__init__(**kwargs)
        [pre, post] = c_f.many_default([pre, post], [[], []])
        aligner_hook = ManyAlignerHook(
            loss_fn=loss_fn, aligner_hook=aligner_hook, softmax=softmax
        )
        hook = ChainHook(*pre, aligner_hook, CLossHook(), *post)
        hook = OptimizerHook(hook, opts, weighter, reducer)
        s_hook = SummaryHook({"total_loss": hook})
        self.hook = ChainHook(hook, s_hook)

FeaturesLogitsAlignerHook

Bases: BaseWrapperHook

This chains together an AlignerHook for "features" followed by an AlignerHook for "logits".

Source code in pytorch_adapt\hooks\aligners.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
class FeaturesLogitsAlignerHook(BaseWrapperHook):
    """
    This chains together an
    [```AlignerHook```][pytorch_adapt.hooks.AlignerHook] for
    ```"features"``` followed by an ```AlignerHook``` for ```"logits"```.
    """

    def __init__(
        self,
        loss_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None,
        **kwargs,
    ):
        """
        Arguments:
            loss_fn: The loss used by both aligner hooks.
        """
        super().__init__(**kwargs)
        loss_fn = c_f.default(loss_fn, MMDLoss, {})
        a1_hook = AlignerHook(loss_fn, layer="features")
        a2_hook = AlignerHook(loss_fn, layer="logits")
        self.hook = ChainHook(a1_hook, a2_hook)

__init__(loss_fn=None, **kwargs)

Parameters:

Name Type Description Default
loss_fn Callable[[torch.Tensor, torch.Tensor], torch.Tensor]

The loss used by both aligner hooks.

None
Source code in pytorch_adapt\hooks\aligners.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
def __init__(
    self,
    loss_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None,
    **kwargs,
):
    """
    Arguments:
        loss_fn: The loss used by both aligner hooks.
    """
    super().__init__(**kwargs)
    loss_fn = c_f.default(loss_fn, MMDLoss, {})
    a1_hook = AlignerHook(loss_fn, layer="features")
    a2_hook = AlignerHook(loss_fn, layer="logits")
    self.hook = ChainHook(a1_hook, a2_hook)

JointAlignerHook

Bases: BaseWrapperHook

Computes a joint alignment loss (e.g Joint MMD) based on multiple features from two domains.

The default setting is to use the features and logits from the source and target domains.

Source code in pytorch_adapt\hooks\aligners.py
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
class JointAlignerHook(BaseWrapperHook):
    """
    Computes a joint alignment loss (e.g Joint MMD) based on
    multiple features from two domains.

    The default setting is to use the features and logits
    from the source and target domains.
    """

    def __init__(
        self,
        loss_fn: Callable[
            [List[torch.Tensor], List[torch.Tensor]], torch.Tensor
        ] = None,
        hook: BaseHook = None,
        **kwargs,
    ):
        """
        Arguments:
            loss_fn: a function that computes a distance
                between two **lists** of tensors. If ```None```,
                it defaults to [```MMDLoss```][pytorch_adapt.layers.MMDLoss].
            hook: the hook for computing features and logits
        """
        super().__init__(**kwargs)
        self.loss_fn = c_f.default(loss_fn, MMDLoss, {})
        self.hook = c_f.default(hook, FeaturesAndLogitsHook, {})

    def call(self, inputs, losses):
        outputs = self.hook(inputs, losses)[0]
        src = self.get_all_domain_features(inputs, outputs, "src")
        target = self.get_all_domain_features(inputs, outputs, "target")
        confusion_loss = self.loss_fn(src, target)
        return outputs, {self._loss_keys()[0]: confusion_loss}

    def _loss_keys(self):
        return ["joint_confusion_loss"]

    def get_all_domain_features(self, inputs, outputs, domain):
        return c_f.extract(
            [outputs, inputs], sorted(c_f.filter(self.hook.out_keys, f"^{domain}"))
        )

__init__(loss_fn=None, hook=None, **kwargs)

Parameters:

Name Type Description Default
loss_fn Callable[[List[torch.Tensor], List[torch.Tensor]], torch.Tensor]

a function that computes a distance between two lists of tensors. If None, it defaults to MMDLoss.

None
hook BaseHook

the hook for computing features and logits

None
Source code in pytorch_adapt\hooks\aligners.py
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
def __init__(
    self,
    loss_fn: Callable[
        [List[torch.Tensor], List[torch.Tensor]], torch.Tensor
    ] = None,
    hook: BaseHook = None,
    **kwargs,
):
    """
    Arguments:
        loss_fn: a function that computes a distance
            between two **lists** of tensors. If ```None```,
            it defaults to [```MMDLoss```][pytorch_adapt.layers.MMDLoss].
        hook: the hook for computing features and logits
    """
    super().__init__(**kwargs)
    self.loss_fn = c_f.default(loss_fn, MMDLoss, {})
    self.hook = c_f.default(hook, FeaturesAndLogitsHook, {})