from tsai.data.basics import *
from tsai.learner import *
XCM
An Explainable Convolutional Neural Network for Multivariate Time Series Classification
This is an unofficial PyTorch implementation of XCM created by Ignacio Oguiza (oguiza@timeseriesAI.co)
XCM
XCM (c_in:int, c_out:int, seq_len:Optional[int]=None, nf:int=128, window_perc:float=1.0, flatten:bool=False, custom_head:<built- infunctioncallable>=None, concat_pool:bool=False, fc_dropout:float=0.0, bn:bool=False, y_range:tuple=None, **kwargs)
Same as nn.Module
, but no need for subclasses to call super().__init__
= 'NATOPS'
dsid = get_UCR_data(dsid, split_data=False)
X, y, splits = [None, TSCategorize()]
tfms = get_ts_dls(X, y, splits=splits, tfms=tfms)
dls = XCM(dls.vars, dls.c, dls.len)
model = ts_learner(dls, model, metrics=accuracy)
learn = dls.one_batch()
xb, yb
= xb.shape
bs, c_in, seq_len = len(np.unique(yb.cpu().numpy()))
c_out
= XCM(c_in, c_out, seq_len, fc_dropout=.5)
model
test_eq(model.to(xb.device)(xb).shape, (bs, c_out))= XCM(c_in, c_out, seq_len, concat_pool=True)
model
test_eq(model.to(xb.device)(xb).shape, (bs, c_out))= XCM(c_in, c_out, seq_len)
model
test_eq(model.to(xb.device)(xb).shape, (bs, c_out)) model
XCM(
(conv2dblock): Sequential(
(0): Unsqueeze(dim=1)
(1): Conv2dSame(
(conv2d_same): Conv2d(1, 128, kernel_size=(1, 51), stride=(1, 1))
)
(2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): ReLU()
)
(conv2d1x1block): Sequential(
(0): Conv2d(128, 1, kernel_size=(1, 1), stride=(1, 1))
(1): ReLU()
(2): Squeeze(dim=1)
)
(conv1dblock): Sequential(
(0): Conv1d(24, 128, kernel_size=(51,), stride=(1,), padding=(25,))
(1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
(conv1d1x1block): Sequential(
(0): Conv1d(128, 1, kernel_size=(1,), stride=(1,))
(1): ReLU()
)
(concat): Concat(dim=1)
(conv1d): Sequential(
(0): Conv1d(25, 128, kernel_size=(51,), stride=(1,), padding=(25,))
(1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
(head): Sequential(
(0): GAP1d(
(gap): AdaptiveAvgPool1d(output_size=1)
(flatten): Reshape(bs)
)
(1): LinBnDrop(
(0): Linear(in_features=128, out_features=6, bias=True)
)
)
)
model.show_gradcam(xb, yb)
0], yb[0]) model.show_gradcam(xb[
[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.
= 16
bs = 3
n_vars = 12
seq_len = 10
c_out = torch.rand(bs, n_vars, seq_len)
xb = partial(conv_lin_nd_head, d=(5, 2))
new_head = XCM(n_vars, c_out, seq_len, custom_head=new_head)
net print(net.to(xb.device)(xb).shape)
net.head
torch.Size([16, 5, 2, 10])
create_conv_lin_nd_head(
(0): Conv1d(128, 10, kernel_size=(1,), stride=(1,))
(1): Linear(in_features=12, out_features=10, bias=True)
(2): Transpose(-1, -2)
(3): Reshape(bs, 5, 2, 10)
)
= 16
bs = 3
n_vars = 12
seq_len = 2
c_out = torch.rand(bs, n_vars, seq_len)
xb = XCM(n_vars, c_out, seq_len)
net =False)
change_model_head(net, create_pool_plus_head, concat_poolprint(net.to(xb.device)(xb).shape)
net.head
torch.Size([16, 2])
Sequential(
(0): AdaptiveAvgPool1d(output_size=1)
(1): Reshape(bs)
(2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): Linear(in_features=128, out_features=512, bias=False)
(4): ReLU(inplace=True)
(5): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(6): Linear(in_features=512, out_features=2, bias=False)
)