@@ -66,19 +66,19 @@ from ya_glm.toy_data import sample_sparse_multinomial, sample_sparse_lin_reg
6666X, y = sample_sparse_multinomial(n_samples = 100 , n_features = 10 , n_classes = 3 )[0 :2 ]
6767
6868# programatically generate any loss + penalty combination
69- Est, EstCV = get_pen_glm(loss_func = ' multinomial' ,
69+ Est, EstCV = get_pen_glm(loss_func = ' multinomial' , # 'lin_reg', 'poisson', ...
7070 penalty = ' lasso' # 'enet', 'adpt_lasso', 'adpt_enet', 'fcp_lla'
7171 )
7272
7373
74- # fit using the sklean API you know and love
74+ # fit using the sklearn API you know and love!
7575Est(multi_task = True ).fit(X, y)
7676# Est().fit(X, y) # entrywise Lasso
7777# Est(nuc=True).fit(X, y) # nuclear norm
7878
7979# tune the lasso penalty with cross-validation
80- # we automatically compute the tuning sequence
81- # for any loss + penalty combination (the concave ones included !)
80+ # we automatically generate the tuning sequence
81+ # for any loss + penalty combination (including concave ones!)
8282EstCV(cv_select_rule = ' 1se' , # here we select the penalty parameter with the 1se rule
8383 cv_n_jobs = - 1 # parallelization over CV folds with joblib
8484 ).fit(X, y)
@@ -87,10 +87,10 @@ EstCV(cv_select_rule='1se', # here we select the penalty parameter with the 1se
8787# Lets try a concave penalty such as the adaptive Lasso
8888# or a concave penalty fit with the LLA algorithm
8989Est_concave, EstCV_concave = get_pen_glm(loss_func = ' multinomial' ,
90- penalty = ' fcp_lla ' # 'adpt_lasso '
90+ penalty = ' adpt_lasso ' # 'fcp_lla '
9191 )
9292
93- # concave penalties require an initilizer which is set via the init argument
93+ # concave penalties require an initializer which is set via the ' init' argument
9494# by default we initialize with a LassoCV
9595est = Est_concave(init = ' default' , multi_task = True ).fit(X, y)
9696
@@ -100,7 +100,7 @@ est = Est_concave(init=init)
100100est_cv = EstCV_concave(estimator = est)
101101
102102
103- # Here we an Elastic Net version of the Adaptive Lasso
103+ # Here we use an Elastic Net version of the Adaptive Group Lasso
104104# with user specified groups for a liner regression example
105105Est, EstCV = get_pen_glm(loss_func = ' lin_reg' , penalty = ' adpt_enet' )
106106X, y = sample_sparse_lin_reg(n_samples = 100 , n_features = 10 , n_nonzero = 5 )[0 :2 ]
@@ -110,16 +110,16 @@ est = Est(groups=groups)
110110EstCV(estimator = est).fit(X, y)
111111
112112
113- # we provide a penalized qunatile regression solve based on
114- # Linear Programming for Lasso penalties or Quadratic Programming for Ridge type penalties
115- from ya_glm.backends.quantile_lp.glm_solver import solve_glm
113+ # we provide a penalized quantile regression solver based on Linear Programming for
114+ # Lasso penalties or Quadratic Programming for Ridge type penalties
116115
117- # Quantile regression with your favorite optimzation algorithm
116+ # Quantile regression with your favorite optimization algorithm
118117# you can easily provide your own optimization algorithm to be the backend solver
118+ from ya_glm.backends.quantile_lp.glm_solver import solve_glm # Linear Programming formulation of quantile regression
119+
119120Est, EstCV = get_pen_glm(loss_func = ' quantile' ,
120121 penalty = ' adpt_lasso' ,
121- backend = {' solve_glm' : solve_glm # solves a single penalize GLM problem
122- # 'solve_glm_path': None # path algorithm
122+ backend = {' solve_glm' : solve_glm
123123 }
124124 )
125125
0 commit comments