Hi Team, I see NBEATS supports explainability, gi...
# neural-forecast
p
Hi Team, I see NBEATS supports explainability, giving decomposition of Trend and Seasonality on prediction, I see a function plot_interpretation in base pytorch implementation of NBEATS. I am trying to look if Nixtla implementation does have something similar. Any leads would be really appreciated https://pytorch-forecasting.readthedocs.io/en/latest/tutorials/ar.html
m
Absolutely! You can follow this tutorial. I hope this helps!
🙌 1
p
Thanks a ton, will go through it.
Hi @Marco Tried replicating same steps and tried tweaking the input_size too but still I get this error
Copy code
RuntimeError: maximum size for tensor at dimension 2 is 30 but size is 36
Error trace -
Copy code
GPU available: True (cuda), used: True
TPU available: False, using: 0 TPU cores
IPU available: False, using: 0 IPUs
HPU available: False, using: 0 HPUs
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
Copy code
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
Cell In[40], line 1
----> 1 y_hat = model.decompose(dataset=dataset)

File /opt/conda/lib/python3.10/site-packages/neuralforecast/common/_base_windows.py:728, in BaseWindows.decompose(self, dataset, step_size, random_seed, **data_module_kwargs)
    722 datamodule = TimeSeriesDataModule(
    723     dataset=dataset,
    724     valid_batch_size=self.valid_batch_size,
    725     **data_module_kwargs,
    726 )
    727 trainer = pl.Trainer(**self.trainer_kwargs)
--> 728 fcsts = trainer.predict(self, datamodule=datamodule)
    729 self.decompose_forecast = False  # Default decomposition back to false
    730 return torch.vstack(fcsts).numpy()

File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/trainer/trainer.py:864, in Trainer.predict(self, model, dataloaders, datamodule, return_predictions, ckpt_path)
    862 self.state.status = TrainerStatus.RUNNING
    863 self.predicting = True
--> 864 return call._call_and_handle_interrupt(
    865     self, self._predict_impl, model, dataloaders, datamodule, return_predictions, ckpt_path
    866 )

File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/trainer/call.py:44, in _call_and_handle_interrupt(trainer, trainer_fn, *args, **kwargs)
     42     if trainer.strategy.launcher is not None:
     43         return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, **kwargs)
---> 44     return trainer_fn(*args, **kwargs)
     46 except _TunerExitException:
     47     _call_teardown_hook(trainer)

File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/trainer/trainer.py:903, in Trainer._predict_impl(self, model, dataloaders, datamodule, return_predictions, ckpt_path)
    899 assert self.state.fn is not None
    900 ckpt_path = self._checkpoint_connector._select_ckpt_path(
    901     self.state.fn, ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
    902 )
--> 903 results = self._run(model, ckpt_path=ckpt_path)
    905 assert self.state.stopped
    906 self.predicting = False

File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/trainer/trainer.py:987, in Trainer._run(self, model, ckpt_path)
    982 self._signal_connector.register_signal_handlers()
    984 # ----------------------------
    985 # RUN THE TRAINER
    986 # ----------------------------
--> 987 results = self._run_stage()
    989 # ----------------------------
    990 # POST-Training CLEAN UP
    991 # ----------------------------
    992 log.debug(f"{self.__class__.__name__}: trainer tearing down")

File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/trainer/trainer.py:1028, in Trainer._run_stage(self)
   1026     return self._evaluation_loop.run()
   1027 if self.predicting:
-> 1028     return self.predict_loop.run()
   1029 if self.training:
   1030     with isolate_rng():

File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/loops/utilities.py:182, in _no_grad_context.<locals>._decorator(self, *args, **kwargs)
    180     context_manager = torch.no_grad
    181 with context_manager():
--> 182     return loop_run(self, *args, **kwargs)

File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:124, in _PredictionLoop.run(self)
    122     self.batch_progress.is_last_batch = data_fetcher.done
    123     # run step hooks
--> 124     self._predict_step(batch, batch_idx, dataloader_idx, dataloader_iter)
    125 except StopIteration:
    126     # this needs to wrap the `*_step` call too (not just `next`) for `dataloader_iter` support
    127     break

File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/loops/prediction_loop.py:253, in _PredictionLoop._predict_step(self, batch, batch_idx, dataloader_idx, dataloader_iter)
    247 # configure step_kwargs
    248 step_args = (
    249     self._build_step_args_from_hook_kwargs(hook_kwargs, "predict_step")
    250     if not using_dataloader_iter
    251     else (dataloader_iter,)
    252 )
--> 253 predictions = call._call_strategy_hook(trainer, "predict_step", *step_args)
    254 if predictions is None:
    255     self._warning_cache.warn("predict returned None if it was on purpose, ignore this warning...")

File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/trainer/call.py:309, in _call_strategy_hook(trainer, hook_name, *args, **kwargs)
    306     return None
    308 with trainer.profiler.profile(f"[Strategy]{trainer.strategy.__class__.__name__}.{hook_name}"):
--> 309     output = fn(*args, **kwargs)
    311 # restore current_fx when nested context
    312 pl_module._current_fx_name = prev_fx_name

File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/strategies/strategy.py:438, in Strategy.predict_step(self, *args, **kwargs)
    436 if self.model != self.lightning_module:
    437     return self._forward_redirection(self.model, self.lightning_module, "predict_step", *args, **kwargs)
--> 438 return self.lightning_module.predict_step(*args, **kwargs)

File /opt/conda/lib/python3.10/site-packages/neuralforecast/common/_base_windows.py:560, in BaseWindows.predict_step(self, batch, batch_idx)
    557 def predict_step(self, batch, batch_idx):
    558 
    559     # TODO: Hack to compute number of windows
--> 560     windows = self._create_windows(batch, step="predict")
    561     n_windows = len(windows["temporal"])
    562     y_idx = batch["y_idx"]

File /opt/conda/lib/python3.10/site-packages/neuralforecast/common/_base_windows.py:244, in BaseWindows._create_windows(self, batch, step, w_idxs)
    241     padder_right = nn.ConstantPad1d(padding=(0, self.h), value=0)
    242     temporal = padder_right(temporal)
--> 244 windows = temporal.unfold(
    245     dimension=-1, size=window_size, step=predict_step_size
    246 )
    248 # [batch, channels, windows, window_size] 0, 1, 2, 3
    249 # -> [batch * windows, window_size, channels] 0, 2, 3, 1
    250 windows_per_serie = windows.shape[2]

RuntimeError: maximum size for tensor at dimension 2 is 30 but size is 36
v
Hello @Marco I have a ValueError. I tried replacing 'freq=1' but still the same error.
m
What is the error when
freq=1
?
v
It's now working. I realized I input 1 as text.