3

I am new to PyTorch and Ray. I was trying to tune my lightning model's hyperparameters using Ray, but when I passed multiple value parameters in the config dictionary, I got an error like this:

TypeError: empty() received an invalid combination of arguments - got (tuple, dtype=NoneType, device=NoneType), but expected one of:
 * (tuple of ints size, *, tuple of names names, torch.memory_format memory_format, torch.dtype dtype, torch.layout layout, torch.device device, bool pin_memory, bool requires_grad)
 * (tuple of ints size, *, torch.memory_format memory_format, Tensor out, torch.dtype dtype, torch.layout layout, torch.device device, bool pin_memory, bool requires_grad)

My config dictionary looks like this:

self.config = {"cnn_fc_linear": tune.choice([32, 64, 128]),
                       "fcn_n_filters": tune.choice([64, 128, 256]),
                       "fcn_fc_linear": tune.choice([64, 128, 256]),
                       }

and I have a tune function as below:

    def tune_asha(self, tracking_uri: str, task_name: str, dataset_location: str,
                  random_seed: Optional[int], validation_strategy: str,
                  sample_rate: Optional[float], saturation_value: float,
                  preprocessed_dataset_cache_location: Optional[str],
                  window_size: Union[int, float],
                  window_augmentation_overlap: float,
                  selected_channels_file_path: str, model_name: str,
                  model_size: str, train_batch_size: int, eval_batch_size: int,
                  learning_rate: float, weight_decay: float,
                  gradient_clip_val: float, max_epochs: int,
                  val_check_interval: float, early_stopping_patience: int,
                  model_checkpoint_dir: str, device: str, config: dict,
                  gpus_per_trial=0):

        scheduler = ASHAScheduler(max_t=max_epochs, grace_period=1,
                                  reduction_factor=2)

        reporter = CLIReporter(
                parameter_columns=["cnn_fc_linear", "fcn_n_filters", "fcn_fc_linear"],
                metric_columns=["loss", "mean_accuracy", "training_iteration"])

        analysis = tune.run(
                tune.with_parameters(self.train,
                                     tracking_uri=tracking_uri, task_name=task_name,
                                    dataset_location=dataset_location, random_seed=random_seed,
                                    validation_strategy=validation_strategy, sample_rate=sample_rate,
                                     saturation_value=saturation_value,
                                    preprocessed_dataset_cache_location=preprocessed_dataset_cache_location,
                                     window_size=window_size, window_augmentation_overlap=window_augmentation_overlap,
                                    selected_channels_file_path=selected_channels_file_path,
                                     model_name=model_name, model_size=model_size,
                                    train_batch_size=train_batch_size, eval_batch_size=eval_batch_size,
                                     learning_rate=learning_rate,
                                    weight_decay=weight_decay, gradient_clip_val=gradient_clip_val, max_epochs=max_epochs,
                                    val_check_interval=val_check_interval, early_stopping_patience=early_stopping_patience,
                                    model_checkpoint_dir=model_checkpoint_dir, device=device),
                resources_per_trial={"cpu": 1, "gpu": gpus_per_trial},
                metric="loss", mode="min", config=config,
                # num_samples=num_samples
                scheduler=scheduler,
                progress_reporter=reporter, name="tune_mnist_asha")

        print("Best hyperparameters found were: ", analysis.best_config)

How can I fix this error?

0 Answers0