Module openpack_torch.models.keypoint

Expand source code
from .graph import get_adjacency_matrix
from .stgcn import STGCN4Seg

__all__ = ["STGCN4Seg", "get_adjacency_matrix"]

Sub-modules

openpack_torch.models.keypoint.graph
  • Ref.1: …
openpack_torch.models.keypoint.stgcn

Ref: …

Functions

def get_adjacency_matrix(layout: str = 'MSCOCO', hop_size: int = 2) ‑> numpy.ndarray

Returns adjacency matrix.

Args

layout : str, optional
skeleton layout. {MSCOCO, NTU-RGBD}. Defaults to "MSCOCO".
hop_size : int, optional
maximum distance of connection. Defaults to 2.

Raises

ValueError
description

Returns

np.ndarray
adjacency matrix
Expand source code
def get_adjacency_matrix(
        layout: str = "MSCOCO",
        hop_size: int = 2) -> np.ndarray:
    """Returns adjacency matrix.

    Args:
        layout (str, optional): skeleton layout. {MSCOCO, NTU-RGBD}. Defaults to "MSCOCO".
        hop_size (int, optional): maximum distance of connection. Defaults to 2.

    Raises:
        ValueError: _description_

    Returns:
        np.ndarray: adjacency matrix
    """
    if layout.upper() == "NTU-RGBD":
        graph = Graph(hop_size, NUM_NODES_NTU_RGBD, NTU_RGBD_SKELETON_LAYOUT)
    elif layout.upper() == "MSCOCO":
        graph = Graph(hop_size, NUM_NODES_MSCOCO, MSCOCO_SKELETON_LAYOUT)
    else:
        raise ValueError(f"unknown layout, [layout = {layout}]")
    return graph.A

Classes

class STGCN4Seg (in_channels: int = None, num_classes: int = None, Ks: int = None, Kt: int = None, A: numpy.ndarray = None)

Implementation of ST-GCN for segmentation task.

Initialize internal Module state, shared by both nn.Module and ScriptModule.

Expand source code
class STGCN4Seg(nn.Module):
    """Implementation of ST-GCN for segmentation task.

    """

    def __init__(
            self,
            in_channels: int = None,
            num_classes: int = None,
            Ks: int = None,
            Kt: int = None,
            A: np.ndarray = None,
    ):
        super().__init__()
        A = torch.tensor(A, dtype=torch.float32, requires_grad=False)
        self.register_buffer('A', A)
        A_size = A.size()
        num_vertex = A.size(1)

        # Batch Normalization
        self.bn = nn.BatchNorm1d(in_channels * A_size[1])

        # STConvBlocks
        self.stgc1 = STConvBlock(
            in_channels,
            32,
            Ks=Ks,
            Kt=Kt,
            num_vertex=num_vertex)
        self.stgc2 = STConvBlock(32, 32, Ks=Ks, Kt=Kt, num_vertex=num_vertex)
        self.stgc3 = STConvBlock(32, 32, Ks=Ks, Kt=Kt, num_vertex=num_vertex)
        self.stgc4 = STConvBlock(32, 64, Ks=Ks, Kt=Kt, num_vertex=num_vertex)

        self.stgc5 = STConvBlock(64, 64, Ks=Ks, Kt=Kt, num_vertex=num_vertex)
        self.stgc6 = STConvBlock(64, 64, Ks=Ks, Kt=Kt, num_vertex=num_vertex)
        self.stgc7 = STConvBlock(64, 64, Ks=Ks, Kt=Kt, num_vertex=num_vertex)
        self.stgc8 = STConvBlock(64, 64, Ks=Ks, Kt=Kt, num_vertex=num_vertex)

        self.stgc9 = STConvBlock(64, 64, Ks=Ks, Kt=Kt, num_vertex=num_vertex)
        self.stgc10 = STConvBlock(64, 64, Ks=Ks, Kt=Kt, num_vertex=num_vertex)
        self.stgc11 = STConvBlock(64, 64, Ks=Ks, Kt=Kt, num_vertex=num_vertex)
        self.stgc12 = STConvBlock(64, 64, Ks=Ks, Kt=Kt, num_vertex=num_vertex)

        # Prediction
        self.fc = nn.Conv2d(64, num_classes, kernel_size=(1, num_vertex))

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Args:
            x (torch.Tensor): shape=(BATCH, IN_CH, FRAMES, VERTEX)
        Returns:
            torch.Tensor: the same shape as the input ``x``.
        """
        # Batch Normalization
        N, C, T, V = x.size()  # batch, channel, frame, node
        x = x.permute(0, 3, 1, 2).contiguous().view(N, V * C, T)
        x = self.bn(x)
        x = x.view(N, V, C, T).permute(0, 2, 3, 1).contiguous()

        # STGC_blocks
        x = self.stgc1(x, self.A)
        x = self.stgc2(x, self.A)
        x = self.stgc3(x, self.A)
        x = self.stgc4(x, self.A)

        x = self.stgc5(x, self.A)
        x = self.stgc6(x, self.A)
        x = self.stgc7(x, self.A)
        x = self.stgc8(x, self.A)

        x = self.stgc9(x, self.A)
        x = self.stgc10(x, self.A)
        x = self.stgc11(x, self.A)
        x = self.stgc12(x, self.A)

        # Prediction
        x = self.fc(x)
        return x

Ancestors

  • torch.nn.modules.module.Module

Methods

def forward(self, x: torch.Tensor) ‑> torch.Tensor

Args

x : torch.Tensor
shape=(BATCH, IN_CH, FRAMES, VERTEX)

Returns

torch.Tensor
the same shape as the input x.
Expand source code
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Args:
        x (torch.Tensor): shape=(BATCH, IN_CH, FRAMES, VERTEX)
    Returns:
        torch.Tensor: the same shape as the input ``x``.
    """
    # Batch Normalization
    N, C, T, V = x.size()  # batch, channel, frame, node
    x = x.permute(0, 3, 1, 2).contiguous().view(N, V * C, T)
    x = self.bn(x)
    x = x.view(N, V, C, T).permute(0, 2, 3, 1).contiguous()

    # STGC_blocks
    x = self.stgc1(x, self.A)
    x = self.stgc2(x, self.A)
    x = self.stgc3(x, self.A)
    x = self.stgc4(x, self.A)

    x = self.stgc5(x, self.A)
    x = self.stgc6(x, self.A)
    x = self.stgc7(x, self.A)
    x = self.stgc8(x, self.A)

    x = self.stgc9(x, self.A)
    x = self.stgc10(x, self.A)
    x = self.stgc11(x, self.A)
    x = self.stgc12(x, self.A)

    # Prediction
    x = self.fc(x)
    return x