from tsai.basics import *
from tsai.data.all import *
TSPerceiver
This implementation is inspired by:
Jaegle, A., Gimeno, F., Brock, A., Zisserman, A., Vinyals, O., & Carreira, J. (2021).
Perceiver: General Perception with Iterative Attention. arXiv preprint arXiv:2103.03206.
Paper: https://arxiv.org/pdf/2103.03206.pdf
Official repo: Not available as og April, 2021.
TSPerceiver
TSPerceiver (c_in, c_out, seq_len, cat_szs=0, n_cont=0, n_latents=512, d_latent=128, d_context=None, n_layers=6, self_per_cross_attn=1, share_weights=True, cross_n_heads=1, self_n_heads=8, d_head=None, attn_dropout=0.0, fc_dropout=0.0, concat_pool=False)
Same as nn.Module
, but no need for subclasses to call super().__init__
= 'OliveOil'
dsid = get_UCR_data(dsid, split_data=False)
X, y, splits = get_ts_features(X, y)
ts_features_df ts_features_df.shape
Feature Extraction: 100%|██████████████████████████████████████████| 30/30 [00:00<00:00, 189.16it/s]
(60, 11)
# raw ts
= [None, [Categorize()]]
tfms = TSStandardize(by_sample=True)
batch_tfms = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
ts_dls
# ts features
= None
cat_names = ts_features_df.columns[:-2]
cont_names = 'target'
y_names = get_tabular_dls(ts_features_df, cat_names=cat_names, cont_names=cont_names, y_names=y_names, splits=splits)
tab_dls
# mixed
= get_mixed_dls(ts_dls, tab_dls)
mixed_dls = mixed_dls.one_batch() xb, yb
= TSPerceiver(ts_dls.vars, ts_dls.c, ts_dls.len, cat_szs=0,
model # n_cont=0,
=xb[1][1].shape[1],
n_cont=128, d_latent=128, n_layers=3, self_per_cross_attn=1, share_weights=True,
n_latents=16, self_n_heads=16, d_head=None, attn_dropout=0., fc_dropout=0.).to(device)
cross_n_heads0], len(np.unique(y)))) test_eq(model(xb).shape, (yb.shape[