For improving models performance you need to do hyperparameter tuning. These are the parameters that significantly impact the output of you model.
For tuning your hyperparameters, you need to do cross validation followed by best parameter selection. Docs
import itertools
import numpy as np
import pandas as pd
from prophet.diagnostics import performance_metrics, cross_validation
from prophet import Prophet
param_grid = {
'changepoint_prior_scale': [0.001, 0.01, 0.1, 0.5],
'seasonality_prior_scale': [0.01, 0.1, 1.0, 10.0],
}
# Generate all combinations of parameters
all_params = [dict(zip(param_grid.keys(), v)) for v in itertools.product(*param_grid.values())]
rmses = [] # Store the RMSEs for each params here
# Use cross validation to evaluate all parameters
for params in all_params:
m = Prophet(**params).fit(df) # Fit model with given params
df_cv = cross_validation(m, cutoffs=cutoffs, horizon='30 days', parallel="processes")
df_p = performance_metrics(df_cv, rolling_window=1)
maes.append(df_p['mae'].values[0])
# Find the best parameters
tuning_results = pd.DataFrame(all_params)
tuning_results['mae'] = maes
best_params = all_params[np.argmin(maes)]
print(best_params)
better_model = Prophet(**best_params)