Skip to content

stochastic_linear

StochasticLinear

Bases: nn.Module

Implementation of the stochastic layer from Stochastic Classifiers for Unsupervised Domain Adaptation. In train() mode, it uses random weights and biases that are sampled from a learned normal distribution. In eval() mode, the learned mean is used.

Source code in pytorch_adapt\layers\stochastic_linear.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
class StochasticLinear(nn.Module):
    """
    Implementation of the stochastic layer from
    [Stochastic Classifiers for Unsupervised Domain Adaptation](https://xiatian-zhu.github.io/papers/LuEtAl_CVPR2020.pdf).
    In ```train()``` mode, it uses random weights and biases
    that are sampled from a learned normal distribution.
    In ```eval()``` mode, the learned mean is used.
    """

    def __init__(self, in_features: int, out_features: int, device=None, dtype=None):
        """
        Arguments:
            in_features: size of each input sample
            out_features: size of each output sample
        """
        factory_kwargs = {"device": device, "dtype": dtype}
        super().__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight_mean = nn.Parameter(
            torch.empty(in_features, out_features, **factory_kwargs)
        )
        self.weight_sigma = nn.Parameter(
            torch.empty(in_features, out_features, **factory_kwargs)
        )
        self.bias_mean = nn.Parameter(torch.empty(out_features, **factory_kwargs))
        self.bias_sigma = nn.Parameter(torch.empty(out_features, **factory_kwargs))
        self.reset_parameters()

    def reset_parameters(self):
        reset_parameters_helper(self.weight_mean, self.bias_mean)
        reset_parameters_helper(self.weight_sigma, self.bias_sigma)

    def random_sample(self, mean, sigma):
        eps = torch.randn(*sigma.shape, device=sigma.device, dtype=sigma.dtype)
        return mean + (sigma * eps)

    def forward(self, x):
        """"""
        if self.training:
            weight = self.random_sample(self.weight_mean, self.weight_sigma)
            bias = self.random_sample(self.bias_mean, self.bias_sigma)
        else:
            weight = self.weight_mean
            bias = self.bias_mean

        return torch.matmul(x, weight) + bias

    def extra_repr(self):
        """"""
        return c_f.extra_repr(self, ["in_features", "out_features"])

__init__(in_features, out_features, device=None, dtype=None)

Parameters:

Name Type Description Default
in_features int

size of each input sample

required
out_features int

size of each output sample

required
Source code in pytorch_adapt\layers\stochastic_linear.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
def __init__(self, in_features: int, out_features: int, device=None, dtype=None):
    """
    Arguments:
        in_features: size of each input sample
        out_features: size of each output sample
    """
    factory_kwargs = {"device": device, "dtype": dtype}
    super().__init__()
    self.in_features = in_features
    self.out_features = out_features
    self.weight_mean = nn.Parameter(
        torch.empty(in_features, out_features, **factory_kwargs)
    )
    self.weight_sigma = nn.Parameter(
        torch.empty(in_features, out_features, **factory_kwargs)
    )
    self.bias_mean = nn.Parameter(torch.empty(out_features, **factory_kwargs))
    self.bias_sigma = nn.Parameter(torch.empty(out_features, **factory_kwargs))
    self.reset_parameters()