Module openpack_torch.models.imu

Expand source code
from .deep_conv_lstm import DeepConvLSTM, DeepConvLSTMSelfAttn
from .unet import UNet

__all__ = ["DeepConvLSTM", "DeepConvLSTMSelfAttn", "UNet"]

Sub-modules

openpack_torch.models.imu.deep_conv_lstm

Implementation of DeepConvLSTM Reference: - https://www.mdpi.com/1424-8220/16/1/115

openpack_torch.models.imu.unet

Classes

class DeepConvLSTM (in_ch: int = 6, num_classes: int = None)

Imprementation of DeepConvLSTM [Sensors 2016].

Note

https://www.mdpi.com/1424-8220/16/1/115 (Sensors, 2016)

Initialize internal Module state, shared by both nn.Module and ScriptModule.

Expand source code
class DeepConvLSTM(nn.Module):
    """Imprementation of DeepConvLSTM [Sensors 2016].

    Note:
        https://www.mdpi.com/1424-8220/16/1/115 (Sensors, 2016)

    """

    def __init__(self, in_ch: int = 6, num_classes: int = None):
        super().__init__()

        # NOTE: The first block is input layer.

        # -- [L2-5] Convolutions --
        blocks = []
        for i in range(4):
            in_ch_ = in_ch if i == 0 else 64
            blocks.append(
                nn.Sequential(
                    nn.Conv2d(in_ch_, 64, kernel_size=(5, 1), padding=(2, 0)),
                    nn.BatchNorm2d(64),
                    nn.ReLU(),
                )
            )
        self.conv2to5 = nn.ModuleList(blocks)

        # -- [L6-7] LSTM --
        hidden_units = 128
        self.lstm6 = nn.LSTM(64, hidden_units, batch_first=True)
        self.lstm7 = nn.LSTM(hidden_units, hidden_units, batch_first=True)
        self.dropout6 = nn.Dropout(p=0.5)
        self.dropout7 = nn.Dropout(p=0.5)

        # -- [L8] Softmax Layer (Output Layer) --
        self.out8 = nn.Conv2d(
            hidden_units,
            num_classes,
            1,
            stride=1,
            padding=0,
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Args:
            x (torch.Tensor): shape = (B, CH, T, 1)
        Returns:
            torch.Tensor: shape = (B, N_CLASSES, T, 1)
        """
        # -- Conv --
        for i in range(4):
            x = self.conv2to5[i](x)

        # -- LSTM --
        # Reshape: (B, CH, 1, T) -> (B, T, CH)
        x = x.squeeze(3).transpose(1, 2)

        x, _ = self.lstm6(x)
        x = self.dropout6(x)
        x, _ = self.lstm7(x)
        x = self.dropout7(x)

        # -- [L8] Softmax Layer (Output Layer) --
        # Reshape: (B, T, CH) -> (B, CH, T, 1)
        x = x.transpose(1, 2).unsqueeze(3)
        x = self.out8(x)
        return x

Ancestors

  • torch.nn.modules.module.Module

Methods

def forward(self, x: torch.Tensor) ‑> torch.Tensor

Args

x : torch.Tensor
shape = (B, CH, T, 1)

Returns

torch.Tensor
shape = (B, N_CLASSES, T, 1)
Expand source code
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Args:
        x (torch.Tensor): shape = (B, CH, T, 1)
    Returns:
        torch.Tensor: shape = (B, N_CLASSES, T, 1)
    """
    # -- Conv --
    for i in range(4):
        x = self.conv2to5[i](x)

    # -- LSTM --
    # Reshape: (B, CH, 1, T) -> (B, T, CH)
    x = x.squeeze(3).transpose(1, 2)

    x, _ = self.lstm6(x)
    x = self.dropout6(x)
    x, _ = self.lstm7(x)
    x = self.dropout7(x)

    # -- [L8] Softmax Layer (Output Layer) --
    # Reshape: (B, T, CH) -> (B, CH, T, 1)
    x = x.transpose(1, 2).unsqueeze(3)
    x = self.out8(x)
    return x
class DeepConvLSTMSelfAttn (in_ch: int = 6, num_classes: int = None, cnn_filters=3, lstm_units=32, num_attn_heads: int = 1)

Imprementation of a DeepConvLSTM with Self-Attention used in ''Deep ConvLSTM with self-attention for human activity decoding using wearable sensors'' (Sensors 2020).

Note

https://ieeexplore.ieee.org/document/9296308 (Sensors 2020)

Initialize internal Module state, shared by both nn.Module and ScriptModule.

Expand source code
class DeepConvLSTMSelfAttn(nn.Module):
    """Imprementation of a DeepConvLSTM with Self-Attention used in ''Deep ConvLSTM with
    self-attention for human activity decoding using wearable sensors'' (Sensors 2020).

    Note:
        https://ieeexplore.ieee.org/document/9296308 (Sensors 2020)
    """

    def __init__(
        self,
        in_ch: int = 6,
        num_classes: int = None,
        cnn_filters=3,
        lstm_units=32,
        num_attn_heads: int = 1,
    ):
        super().__init__()

        # NOTE: The first block is input layer.

        # -- [1] Embedding Layer --
        self.conv = nn.Sequential(
            nn.Conv2d(in_ch, cnn_filters, kernel_size=1, padding=0),
            nn.BatchNorm2d(cnn_filters),
            nn.ReLU(),
        )

        # -- [2] LSTM Encoder --
        self.lstm = nn.LSTM(cnn_filters, lstm_units, batch_first=True)
        self.dropout = nn.Dropout(p=0.5)

        # -- [3] Self-Attention --
        self.attention = nn.MultiheadAttention(
            lstm_units,
            num_attn_heads,
            batch_first=True,
        )

        # -- [4] Softmax Layer (Output Layer) --
        self.out = nn.Conv2d(
            lstm_units,
            num_classes,
            1,
            stride=1,
            padding=0,
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Args:
            x (torch.Tensor): shape = (B, CH, T, 1)
        Returns:
            torch.Tensor: shape = (B, N_CLASSES, T, 1)
        """
        # -- [1] Embedding Layer --
        x = self.conv(x)

        # -- [2] LSTM Encoder --
        # Reshape: (B, CH, 1, T) -> (B, T, CH)
        x = x.squeeze(3).transpose(1, 2)

        x, _ = self.lstm(x)
        x = self.dropout(x)

        # -- [3] Self-Attention --
        x, w = self.attention(x.clone(), x.clone(), x.clone())

        # -- [4] Softmax Layer (Output Layer) --
        # Reshape: (B, T, CH) -> (B, CH, T, 1)
        x = x.transpose(1, 2).unsqueeze(3)
        x = self.out(x)
        return x

Ancestors

  • torch.nn.modules.module.Module

Methods

def forward(self, x: torch.Tensor) ‑> torch.Tensor

Args

x : torch.Tensor
shape = (B, CH, T, 1)

Returns

torch.Tensor
shape = (B, N_CLASSES, T, 1)
Expand source code
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Args:
        x (torch.Tensor): shape = (B, CH, T, 1)
    Returns:
        torch.Tensor: shape = (B, N_CLASSES, T, 1)
    """
    # -- [1] Embedding Layer --
    x = self.conv(x)

    # -- [2] LSTM Encoder --
    # Reshape: (B, CH, 1, T) -> (B, T, CH)
    x = x.squeeze(3).transpose(1, 2)

    x, _ = self.lstm(x)
    x = self.dropout(x)

    # -- [3] Self-Attention --
    x, w = self.attention(x.clone(), x.clone(), x.clone())

    # -- [4] Softmax Layer (Output Layer) --
    # Reshape: (B, T, CH) -> (B, CH, T, 1)
    x = x.transpose(1, 2).unsqueeze(3)
    x = self.out(x)
    return x
class UNet (in_ch: int = 6, num_classes: int = None, ch_inc: int = 32, depth: int = 5)

Input must take channel-first format (BCHW). This model use 2D convolutional filter with kernel size = (f x 1). See also original U-net paper at http://arxiv.org/abs/1505.04597

Note

Time axis should come in the 3rd dimention (i.e., H).

Args

in_ch : int
-
num_classes : int
The number of classes to model.
ch_inc (int, optional):
the number of input channels for UNetEncoder. (Default: 32)
pools (tuple of int):
list of kernel sizes for pooling operations.
depth : int
the number of blocks for Encoder/Decoder.
Expand source code
class UNet(nn.Module):
    """
    Input must take channel-first format (BCHW).
    This model use 2D convolutional filter with kernel size = (f x 1).
    See also original U-net paper at http://arxiv.org/abs/1505.04597
    Note:
        Time axis should come in the 3rd dimention (i.e., H).
    """

    def __init__(
        self,
        in_ch: int = 6,
        num_classes: int = None,
        ch_inc: int = 32,
        depth: int = 5,
    ):
        """
        Args:
            in_ch (int): -
            num_classes (int): The number of classes to model.
            ch_inc (int, optional):
                the number of input channels for UNetEncoder. (Default: 32)
            pools (tuple of int):
               list of kernel sizes for pooling operations.
            depth (int): the number of blocks for Encoder/Decoder.
        """
        super().__init__()

        # NOTE: Add input encoding layer (UNet)
        # Ref:
        # https://github.com/milesial/Pytorch-UNet/blob/master/unet/unet_model.py
        self.inc = nn.Sequential(
            nn.Conv2d(
                in_ch,
                ch_inc,
                kernel_size=(3, 1),
                stride=(1, 1),
                padding=(1, 0),
            ),
            nn.BatchNorm2d(ch_inc),
            nn.ReLU(),
        )
        self.encoder = UNetEncoder(ch_inc, depth=depth)
        self.decoder = UNetDecoder(ch_inc, depth=depth)
        self.dense_clf = nn.Conv2d(ch_inc, num_classes, 1, padding=0, stride=1)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.inc(x)
        (x, res) = self.encoder(x)
        x = self.decoder(x, res)
        x = self.dense_clf(x)
        return x

Ancestors

  • torch.nn.modules.module.Module

Methods

def forward(self, x: torch.Tensor) ‑> torch.Tensor

Define the computation performed at every call.

Should be overridden by all subclasses.

Note

Although the recipe for forward pass needs to be defined within this function, one should call the :class:Module instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.

Expand source code
def forward(self, x: torch.Tensor) -> torch.Tensor:
    x = self.inc(x)
    (x, res) = self.encoder(x)
    x = self.decoder(x, res)
    x = self.dense_clf(x)
    return x