diff --git a/examples/plot_knockoff_aggregation.py b/examples/plot_knockoff_aggregation.py index c56121d1..a690480c 100644 --- a/examples/plot_knockoff_aggregation.py +++ b/examples/plot_knockoff_aggregation.py @@ -1,6 +1,6 @@ """ Knockoff aggregation on simulated data -============================= +====================================== In this example, we show an example of variable selection using model-X Knockoffs introduced by :footcite:t:`Candes_2018`. A notable diff --git a/examples/plot_variable_importance_classif.py b/examples/plot_variable_importance_classif.py index 14718e01..bf5091c8 100644 --- a/examples/plot_variable_importance_classif.py +++ b/examples/plot_variable_importance_classif.py @@ -20,7 +20,7 @@ ############################################################################# # Imports needed -# ------------------------------ +# -------------- import matplotlib.lines as mlines import matplotlib.pyplot as plt @@ -37,7 +37,7 @@ ############################################################################# # Generate the data -# ------------------------------ +# ----------------- # We generate the data using a multivariate normal distribution with a Toeplitz # correlation matrix. The target variable is generated using a non-linear function # of the features. To make the problem more intuitive, we generate a non-linear @@ -81,7 +81,7 @@ ############################################################################# # Visualize the data -# ------------------------------ +# ------------------ fig, axes = plt.subplots( 1, @@ -115,7 +115,7 @@ ############################################################################# # Variable importance inference -# ------------------------------ +# ----------------------------- # We use two different Support Vector Machine models, one with a linear kernel and # one with a polynomial kernel of degree 2, well specified to capture the non-linear # relationship between the features and the target variable. We then use the CPI and @@ -208,7 +208,7 @@ ############################################################################# # Compute the p-values for the variable importance -# ------------------------------ +# ------------------------------------------------ pval_arr = np.zeros((n_features, 3)) for j in range(n_features): @@ -218,7 +218,7 @@ ############################################################################# # Visualize the variable importance -# ------------------------------ +# --------------------------------- # Here we plot the variable importance and highlight the features that are considered # important, with a p-value lower than 0.05, using a diamond marker. We also highlight # the true important features, used to generate the target variable, with a star marker.