# code for loading the format for the notebook
import os
# path : store the current path to convert back to it later
path = os.getcwd()
os.chdir(os.path.join('..', '..', 'notebook_format'))
from formats import load_style
load_style(plot_style=False)
os.chdir(path)
# 1. magic for inline plot
# 2. magic to print version
# 3. magic so that the notebook will reload external python modules
# 4. magic to enable retina (high resolution) plots
# https://gist.github.com/minrk/3301035
%matplotlib inline
%load_ext watermark
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format='retina'
import numpy as np
import pandas as pd
import seaborn as sns
import scipy.stats as stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
%watermark -a 'Ethen' -d -t -v -p numpy,scipy,pandas,sklearn,matplotlib,seaborn
When launching new features to our product, we often times leverage experiments, or so called A/B tests in order to understand and quantify their impact. Popular statistical methods such as t-test often focuses on calculating average treatment effects. Not that there's anything wrong with the approach is just that because average reduces an entire distribution into one single number, any heterogeneity in our distribution may potentially get unnoticed. Keep in mind that in real world settings, negative experience often times stick inside people's head longer and stronger than positive ones. The goal of this notebook is to show how to leverage quantile regression to calculate quantile treatment effect, which offers a more precise alternative to only estimating average treatment effects.
We will use the NYCflights13 data to conduct our experiments, which contains over 300,000 observations of flights departing NYC in 2013. We will focus on a single variable, the delay time of flights arrival in minutes.
# constant column names used across the notebook
arr_delay = 'arr_delay'
airline_name = 'name'
def read_data(path='flights.csv'):
"""Will try and download the data to local [path] if it doesn't exist."""
if not os.path.exists(path):
base_url = 'https://media.githubusercontent.com/media/WillKoehrsen/Data-Analysis/master'
url = base_url + '/univariate_dist/data/formatted_flights.csv'
df = pd.read_csv(url, usecols=[arr_delay, airline_name])
df.to_csv(path, index=False)
else:
df = pd.read_csv(path, usecols=[arr_delay, airline_name])
return df
df = read_data()
print(df.shape)
df.head()
To start with, we'll use density plot to visualize the distribution of the arr_delay
field. For those that are not familiar, think of it as a continuous version of histogram (The plot below overlays density plot on top of histogram).
# change default style figure and font size
plt.rcParams['figure.figsize'] = 10, 8
plt.rcParams['font.size'] = 12
sns.distplot(df[arr_delay], hist=True, kde=True,
hist_kws={'edgecolor':'black'},
kde_kws={'linewidth': 4})
plt.show()
Now, let's say we would like to use this data and compare the arrive time delay of two airlines and once again we'll use our good old density plot to visualize and compare the two airline's arrival time distribution.
Not affiliated with any one of the airline in anyway and neither is this data guaranteed to be up to date with the status quo.
endeavor_airline = 'Endeavor Air Inc.'
us_airway_airline = 'US Airways Inc.'
for airline in [endeavor_airline, us_airway_airline]:
subset = df[df[airline_name] == airline]
sns.distplot(subset[arr_delay], hist=False, kde=True,
kde_kws={'shade': False, 'linewidth': 3}, label=airline)
plt.legend(prop={'size': 12}, title='Airline')
plt.title('Density Plot of Arrival Delays')
plt.xlabel('Delay (min)')
plt.ylabel('Density')
plt.show()
After visualizing the arrival time on the two airlines, we can see that although both distribution seems to be centered around the same area, the tail-end of both side tells a different story, where one of the airline shows that it has a larger tendency of resulting in a delay.
If we were to leverage the two sample t-test to compare the means of two separate samples. We would see that the statistical test would tell us we can't reject the null hypothesis and there is no statistically significant difference between the arrival delay time of the two airlines.
airline1_delay = df.loc[df[airline_name] == endeavor_airline, arr_delay]
airline2_delay = df.loc[df[airline_name] == us_airway_airline, arr_delay]
result = stats.ttest_ind(airline1_delay, airline2_delay, equal_var=True)
result
We can also leverage a single binary variable linear regression to arrive at the same conclusion as the two sample t-test above. The step to do this is do convert our airline variable into a dummy variable and fit a linear regression using the dummy variable as the input feature and the arrival delay time as the response variable.
mask = df[airline_name].isin([endeavor_airline, us_airway_airline])
df_airline_delay = df[mask].reset_index(drop=True)
y = df_airline_delay[arr_delay]
X = pd.get_dummies(df_airline_delay[airline_name], drop_first=True)
X.head()
We'll be using statsmodel
to build the linear regression as it gives R-like statistical output. For people coming from scikit-learn
, y variable comes first in statsmodel and by default, it doesn't automatically fit a constant/intercept, so we'll need to add it ourselves.
# ordinary least square
model = sm.OLS(y, sm.add_constant(X))
result = model.fit()
result.summary()
Notice that the numbers for the t-statistic and p-value matches the two-sample t-test result above. The benefit of using a linear regression is that, we can include many other features to see if they are the reasons behind the arrival delay.
By looking at average treatment effect, we can see that we would be drawing the conclusion that there is no statistical difference in the two airlines' arrival time, however, based on looking that the distribution of two airline's arrival delay, our hunch tells us that we're probably missing something. This is where estimating quantile treatment effect really provide additional insights not found by simply looking at the average treatment effects. To do so, we change our model to a quantile regression and specify the quantile we are interested in.
model = sm.QuantReg(y, sm.add_constant(X))
result = model.fit(q=0.9, kernel='gau')
result.summary()
At 0.9 quantile, we were able to detect a statistically significant effect!
We can of course, also compute this across multiples quantiles and plot the quantile treatment effect in a single figure to get a much more nuanced insights into the treatment effect of our experiment that different quantiles. This allows us the detect for extreme scenarios where a single two-sample t-tests would fail raise any concerns towards the experiment.
def compute_quantile_treatment_effect(X, y, quantiles):
coefs = []
pvalues = []
for q in quantiles:
model = sm.QuantReg(y, sm.add_constant(X))
result = model.fit(q=q, kernel='gau')
coef = result.params[1]
coefs.append(coef)
pvalue = result.pvalues[1]
pvalues.append(pvalue)
coef_name = result.params.index[1]
df_quantile_effect = pd.DataFrame({
'quantile': quantiles,
coef_name: coefs,
'pvalue': pvalues
})
return df_quantile_effect
quantiles = np.arange(0.1, 1.0, 0.1)
df_quantile_effect = compute_quantile_treatment_effect(X, y, quantiles)
df_quantile_effect
fig = plt.figure(figsize=(10, 7))
ax = fig.add_subplot(111)
ax.plot(df_quantile_effect['quantile'], df_quantile_effect['US Airways Inc.'])
ax.set_xlabel('quantiles')
ax.set_ylabel('effect size')
plt.show()