As for now, we can create only shap values for models with ecy as a feature. Base margine is not taken into account in shap package
import pandas as pd
import numpy as np
import pickle
import xgboost as xgb
import os
import shap
import matplotlib.pyplot as pl
shap.initjs()
ModelsDir = '/home/kate/Research/Property/Models/'
DataDir = '/home/kate/Research/Property/Data/'
training_classification_dataset = pd.read_csv('%sproperty_wcf_class_training.csv'%DataDir, error_bad_lines=False, index_col=False)
training_poisson_dataset = pd.read_csv('%sproperty_wcf_training.csv'%DataDir, error_bad_lines=False, index_col=False)
Model_Classification = 'wc_class_f_ecy_XGB_0'
Model_Poisson = 'wc_Poisson_f_ecy_XGB_0'
prediction_column_classification = 'class_f_ecy_cv_xgb'
prediction_column_poisson = 'poisson_f_ecy_cv_xgb'
featureset = [
'cova_deductible',
'roofcd_encd',
'sqft',
'usagetype_encd',
'yearbuilt',
'cova_limit',
'water_risk_fre_3_blk',
'water_risk_3_blk',
'ecy'
]
#
X_classification=training_classification_dataset[featureset]
Dtrain_classification = xgb.DMatrix(X_classification.values)
#
X_poisson=training_poisson_dataset[featureset]
Dtrain_poisson = xgb.DMatrix(X_poisson.values)
xgb_model_file='%s%s.model'%(ModelsDir,Model_Classification)
xgb_model_Classification = pickle.load(open(xgb_model_file, 'rb'))
#
xgb_model_file='%s%s.model'%(ModelsDir,Model_Poisson)
xgb_model_Poisson = pickle.load(open(xgb_model_file, 'rb'))
explainer_Classification = shap.TreeExplainer(xgb_model_Classification)
explainer_Poisson = shap.TreeExplainer(xgb_model_Poisson)
SHAP values sum to the difference between the expected output of the model and the current output for the current player. Note that for the Tree SHAP implmementation the margin output of the model is explained, not the trasformed output (such as a probability for logistic regression). This means that the units of the SHAP values for this model are log odds ratios. Large positive values mean a player is likely to win, while large negative values mean they are likely to lose.
xs = np.linspace(-10,4,100)
pl.xlabel("Log odds of claims")
pl.ylabel("Probability of a claim")
pl.title("How changes in log odds convert to probability of claims")
pl.plot(xs, 1/(1+np.exp(-xs)))
pl.show()
shap_values_Classification = explainer_Classification.shap_values(Dtrain_classification)
df_shap_values_Classification = pd.DataFrame(data=shap_values_Classification, columns=featureset)
df_shap_values_Classification['original_output_value'] = df_shap_values_Classification.sum(axis=1)
df_shap_values_Classification['expected_value'] = explainer_Classification.expected_value
df_shap_values_Classification['output_value'] = df_shap_values_Classification['expected_value'] + df_shap_values_Classification['original_output_value']
df_shap_values_Classification.to_csv('%sshap_values_Classification.csv'%DataDir,header=True,index=False)
shap_values_Poisson = explainer_Poisson.shap_values(Dtrain_poisson)
df_shap_values_Poisson = pd.DataFrame(data=shap_values_Poisson, columns=featureset)
df_shap_values_Poisson['original_output_value'] = df_shap_values_Poisson.sum(axis=1)
df_shap_values_Poisson['expected_value'] = explainer_Poisson.expected_value
df_shap_values_Poisson['output_value'] = df_shap_values_Poisson['expected_value'] + df_shap_values_Poisson['original_output_value']
df_shap_values_Poisson.to_csv('%sshap_values_Poisson.csv'%DataDir,header=True,index=False)
training_classification_dataset[training_dataset[prediction_column_classification]==training_classification_dataset[prediction_column_classification].min()].head(1)
shap.force_plot(explainer_Classification.expected_value, shap_values_Classification[544369,:], X_classification.iloc[544369,:])
training_poisson_dataset[training_poisson_dataset[prediction_column_poisson]==training_poisson_dataset[prediction_column_poisson].min()].head(1)
shap.force_plot(explainer_Poisson.expected_value, shap_values_Poisson[653074,:], X_poisson.iloc[653074,:])
training_classification_dataset[training_dataset[prediction_column_classification]==training_classification_dataset[prediction_column_classification].max()].head(1)
shap.force_plot(explainer_Classification.expected_value, shap_values_Classification[745738,:], X_classification.iloc[745738,:])
training_poisson_dataset[training_poisson_dataset[prediction_column_poisson]==training_poisson_dataset[prediction_column_poisson].max()].head(1)
shap.force_plot(explainer_Poisson.expected_value, shap_values_Poisson[408313,:], X_poisson.iloc[408313,:])
A SHAP value for a feature of a specific prediction represents how much the model prediction changes when we observe that feature. In the summary plot below we plot all the SHAP values for a single feature (such as goldearned) on a row, where the x-axis is the SHAP value (which for this model is in units of log odds of winning). By doing this for all features, we see which features drive the model's prediction a lot (such as goldearned), and which only effect the prediction a little (such as kills). Note that when points don't fit together on the line they pile up vertically to show density. Each dot is also colored by the value of that feature from high to low.
shap.summary_plot(shap_values_Classification, X_classification)
shap.summary_plot(shap_values_Poisson, X_poisson)
The XGBoost model we trained above is very complicated, but by plotting the SHAP value for a feature against the actual value of the feature for all players we can see how changes in the feature's value effect the model's output. Note that these plots are very similar to standard partial dependence plots, but they provide the added advantage of displaying how much context matters for a feature (or in other words how much interaction terms matter). How much interaction terms effect the importance of a feature is capture by the vertical dispersion of the data points. For example earning only 100 gold/min during a game may lower your logg odds of winning by 10 for some players or only 3 for others. Why is this? Because other features of these players effect how much earning gold matters for winning the game. Note that the vertical spread narrows once you earn at least 500 gold/min, meaning the context of other features matters less for high gold earners than low gold earners. We color the datapoints with another feature that most explains the interaction effect variance. For example earning less gold is less bad if you have not died very much, but it is really bad if you also die a lot.
The y-axis in the plots below represents the SHAP value for that feature, so -4 means observing that feature lowers your log odds of winning by 4, while a value of +2 means observing that feature raises your log odds of winning by 2.
Note that these plot just explain how the XGBoost model works, not nessecarily how reality works. Since the XGBoost model is trained from observational data, it is not nessecarily a causal model, and so just because changing a factor makes the model's prediction of winning go up, does not always mean it will raise your actual chances.
shap.dependence_plot('ecy', shap_values_Classification, X_classification, interaction_index='yearbuilt')
shap.dependence_plot('ecy', shap_values_Poisson, X_poisson, interaction_index='yearbuilt')
shap.dependence_plot('ecy', shap_values_Classification, X_classification, interaction_index='usagetype_encd')
shap.dependence_plot('ecy', shap_values_Poisson, X_poisson, interaction_index='usagetype_encd')
when usagetype='PRIMARY' then 7
when usagetype='RENTAL' then 6
when usagetype='COC' then 5
when usagetype='VACANT' then 4
when usagetype='SEASONAL' then 3
when usagetype='SECONDARY' then 2
when usagetype='UNOCCUPIED' then 1
shap.dependence_plot('cova_deductible', shap_values_Classification, X_classification, interaction_index='ecy')
shap.dependence_plot('cova_deductible', shap_values_Poisson, X_poisson, interaction_index='ecy')
shap.dependence_plot('cova_deductible', shap_values_Classification, X_classification, interaction_index='yearbuilt')
shap.dependence_plot('cova_deductible', shap_values_Poisson, X_poisson, interaction_index='yearbuilt')
shap.dependence_plot('cova_deductible', shap_values_Classification, X_classification, interaction_index='water_risk_3_blk')
shap.dependence_plot('cova_deductible', shap_values_Poisson, X_poisson, interaction_index='water_risk_3_blk')
shap.dependence_plot('cova_deductible', shap_values_Classification, X_classification, interaction_index='usagetype_encd')
shap.dependence_plot('cova_deductible', shap_values_Poisson, X_poisson, interaction_index='usagetype_encd')
when usagetype='PRIMARY' then 7
when usagetype='RENTAL' then 6
when usagetype='COC' then 5
when usagetype='VACANT' then 4
when usagetype='SEASONAL' then 3
when usagetype='SECONDARY' then 2
when usagetype='UNOCCUPIED' then 1
shap.dependence_plot('usagetype_encd', shap_values_Classification, X_classification, interaction_index='yearbuilt')
shap.dependence_plot('usagetype_encd', shap_values_Poisson, X_poisson, interaction_index='yearbuilt')
shap.dependence_plot('usagetype_encd', shap_values_Classification, X_classification, interaction_index='water_risk_3_blk')
shap.dependence_plot('usagetype_encd', shap_values_Poisson, X_poisson, interaction_index='water_risk_3_blk')
shap.dependence_plot('cova_limit', shap_values_Classification, X_classification, interaction_index='water_risk_3_blk')
shap.dependence_plot('cova_limit', shap_values_Poisson, X_poisson, interaction_index='water_risk_3_blk')