Module openpack_toolkit.codalab
Expand source code
from . import operation_segmentation
from .operation_segmentation import eval_operation_segmentation, eval_operation_segmentation_wrapper
__all__ = [
"operation_segmentation",
"eval_operation_segmentation",
"eval_operation_segmentation_wrapper"
]
Sub-modules
openpack_toolkit.codalab.operation_segmentation
-
Evaluation codes for Operation Semantic Segmentation Task …
Functions
def eval_operation_segmentation(t_id: numpy.ndarray = None, y_id: numpy.ndarray = None, classes: Tuple[Tuple[int, str], ...] = None, ignore_class_id: int = None, mode: str = 'final') ‑> pandas.core.frame.DataFrame
-
Compute metrics (i.e., precision, recall, f1, support) for the given sequence.
Args
t_id
:np.ndarray
- unixtime and corresponding activity ID, shape=(T,)
y_id
:np.ndarray
- unixtime and predicted activity ID, shape=(T,)
classes
:Tuple
- class definition. pairs of class id and name.
mode
:str
- If final, only the macro score will be calculated. Otherwise, macro avg., weighted avg., and score for each class will be calculated.
Returns
pd.DataFrame
Expand source code
def eval_operation_segmentation( t_id: np.ndarray = None, y_id: np.ndarray = None, classes: Tuple[Tuple[int, str], ...] = None, ignore_class_id: int = None, mode: str = "final", ) -> pd.DataFrame: """Compute metrics (i.e., precision, recall, f1, support) for the given sequence. Args: t_id (np.ndarray): unixtime and corresponding activity ID, shape=(T,) y_id (np.ndarray): unixtime and predicted activity ID, shape=(T,) classes (Tuple): class definition. pairs of class id and name. mode (str): If final, only the macro score will be calculated. Otherwise, macro avg., weighted avg., and score for each class will be calculated. Returns: pd.DataFrame """ assert t_id.ndim == 1 assert y_id.ndim == 1 verify_class_ids(y_id, classes) if ignore_class_id is not None: t_id, y_id = drop_ignore_class(t_id, y_id, ignore_class_id) classes = tuple([t for t in classes if t[0] != ignore_class_id]) df_scores = [ calc_avg_metrics(t_id, y_id, classes, average="macro"), calc_avg_metrics(t_id, y_id, classes, average="weighted"), ] if mode != "final": df_scores.append( calc_class_metrics(t_id, y_id, classes) ) df_scores = pd.concat( df_scores, axis=0, ignore_index=True).set_index("name") return df_scores
def eval_operation_segmentation_wrapper(cfg: omegaconf.dictconfig.DictConfig, outputs: Dict[str, Dict[str, numpy.ndarray]], act_set: ActSet = ActSet(classes=(Label(id=100, name='Picking', version='v3.0.0', is_ignore=False, category=None, event=None), Label(id=200, name='Relocate Item Label', version='v3.2.2', is_ignore=False, category=None, event=None), Label(id=300, name='Assemble Box', version='v3.2.2', is_ignore=False, category=None, event=None), Label(id=400, name='Insert Items', version='v3.2.2', is_ignore=False, category=None, event=None), Label(id=500, name='Close Box', version='v3.2.2', is_ignore=False, category=None, event=None), Label(id=600, name='Attach Box Label', version='v3.2.2', is_ignore=False, category=None, event=None), Label(id=700, name='Scan Label', version='v3.2.2', is_ignore=False, category=None, event=None), Label(id=800, name='Attach Shipping Label', version='v3.2.2', is_ignore=False, category=None, event=None), Label(id=900, name='Put on Back Table', version='v3.2.2', is_ignore=False, category=None, event=None), Label(id=1000, name='Fill out Order', version='v3.2.2', is_ignore=False, category=None, event=None), Label(id=8100, name='Null', version='v3.2.2', is_ignore=True, category=None, event=None))), exclude_ignore_class=True) ‑> pandas.core.frame.DataFrame
-
Compute evaluation metrics from model outputs (predicted probability).
Args
cfg
:DictConfig
- config dict.
outputs
:Dict[str, Dict[str, np.ndarray]]
- dict object that contains t_idx and y. t_idx is a 2d array of target class index with shape=(BATCH_SIZE, WINDOW). y is a 3d array of predction probabilities with shape=(BATCH_SIZE, NUM_CLASSES, WINDOW).
act_set
:ActSete
, optional- class definition.
exclude_ignore_class
:bool
- If true, ignore classes are excluded. (default: True)
Returns
pd.DataFrame
Expand source code
def eval_operation_segmentation_wrapper( cfg: DictConfig, outputs: Dict[str, Dict[str, np.ndarray]], act_set: ActSet = ActSet(OPENPACK_OPERATIONS), exclude_ignore_class=True, ) -> pd.DataFrame: """ Compute evaluation metrics from model outputs (predicted probability). Args: cfg (DictConfig): config dict. outputs (Dict[str, Dict[str, np.ndarray]]): dict object that contains t_idx and y. t_idx is a 2d array of target class index with shape=(BATCH_SIZE, WINDOW). y is a 3d array of predction probabilities with shape=(BATCH_SIZE, NUM_CLASSES, WINDOW). act_set (ActSete, optional): class definition. exclude_ignore_class (bool): If true, ignore classes are excluded. (default: True) Returns: pd.DataFrame """ submission = construct_submission_dict( outputs, act_set, include_ground_truth=True, cfg=cfg) classes = act_set.to_tuple() ignore_class_id = act_set.get_ignore_class_id() if isinstance(ignore_class_id, tuple): raise NotImplementedError() # Evaluate df_scores = [] t_id_concat, y_id_concat = [], [] for key, d in submission.items(): t_id = d["ground_truth"] y_id = d["prediction"] t_id_concat.append(t_id.copy()) y_id_concat.append(y_id.copy()) df_tmp = eval_operation_segmentation( t_id, y_id, classes=classes, ignore_class_id=ignore_class_id, mode=None) df_tmp["key"] = key df_scores.append(df_tmp.reset_index(drop=False)) # Overall Score df_tmp = eval_operation_segmentation( np.concatenate(t_id_concat, axis=0), np.concatenate(y_id_concat, axis=0), classes=classes, ignore_class_id=ignore_class_id, mode=None, ) df_tmp["key"] = "all" df_scores.append(df_tmp.reset_index(drop=False)) df_scores = pd.concat(df_scores, axis=0, ignore_index=True) return df_scores