Skip to content

Commit 7dc5857

Browse files
committed
added sample weights to all estimators
added preprocessing for sparse X reorganized the opt module added sample weights to opt module Merge branch 'sample-weights'
2 parents 49f7579 + a3ad1bc commit 7dc5857

67 files changed

Lines changed: 1618 additions & 1466 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

docs/concave_penalty_two_stage_estimators.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@
139139
{
140140
"data": {
141141
"text/plain": [
142-
"array([ 0.00000000e+00, 0.00000000e+00, -1.36847349e-07, 0.00000000e+00,\n",
142+
"array([ 0.00000000e+00, 0.00000000e+00, -1.36847748e-07, 0.00000000e+00,\n",
143143
" 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,\n",
144144
" 0.00000000e+00, 0.00000000e+00])"
145145
]

docs/linear_regression_broad_overview.ipynb

Lines changed: 79 additions & 68 deletions
Large diffs are not rendered by default.

docs/linear_regression_deeper_look.ipynb

Lines changed: 31 additions & 31 deletions
Large diffs are not rendered by default.

docs/loss_plus_penalty_combo.ipynb

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
},
1919
{
2020
"cell_type": "code",
21-
"execution_count": 11,
21+
"execution_count": 9,
2222
"metadata": {},
2323
"outputs": [],
2424
"source": [
@@ -85,8 +85,8 @@
8585
"name": "stdout",
8686
"output_type": "stream",
8787
"text": [
88-
"CPU times: user 762 ms, sys: 11.5 ms, total: 773 ms\n",
89-
"Wall time: 776 ms\n"
88+
"CPU times: user 778 ms, sys: 13.4 ms, total: 791 ms\n",
89+
"Wall time: 889 ms\n"
9090
]
9191
}
9292
],
@@ -117,8 +117,8 @@
117117
"name": "stdout",
118118
"output_type": "stream",
119119
"text": [
120-
"CPU times: user 2.21 s, sys: 41.8 ms, total: 2.26 s\n",
121-
"Wall time: 2.26 s\n"
120+
"CPU times: user 2.3 s, sys: 63.7 ms, total: 2.37 s\n",
121+
"Wall time: 2.41 s\n"
122122
]
123123
}
124124
],
@@ -161,10 +161,10 @@
161161
"name": "stdout",
162162
"output_type": "stream",
163163
"text": [
164-
"Lasso L2 to truth 1.1222397223387948\n",
165-
"ENet L2 to truth 0.9661888417457567\n",
166-
"Adaptive Lasso L2 to truth 0.5178048099475633\n",
167-
"SCAD L2 to truth 0.8400704078733395\n"
164+
"Lasso L2 to truth 1.1211348334707714\n",
165+
"ENet L2 to truth 0.9630396203100647\n",
166+
"Adaptive Lasso L2 to truth 0.5147703900551543\n",
167+
"SCAD L2 to truth 0.8401140167810839\n"
168168
]
169169
}
170170
],
@@ -244,7 +244,7 @@
244244
},
245245
{
246246
"cell_type": "code",
247-
"execution_count": 10,
247+
"execution_count": 8,
248248
"metadata": {
249249
"scrolled": false
250250
},
@@ -253,15 +253,15 @@
253253
"name": "stdout",
254254
"output_type": "stream",
255255
"text": [
256-
"Entrywise lasso L2 to truth 0.911922407158712\n",
256+
"Entrywise lasso L2 to truth 0.9119237212284282\n",
257257
"\n",
258-
"Multi-task Lasso L2 to truth 0.8684933954494701\n",
259-
"Adaptive multi-task lasso L2 to truth 0.7141523201068535\n",
260-
"FCP multi-task with LLA L2 to truth 0.7502828771190455\n",
258+
"Multi-task Lasso L2 to truth 0.8684943342448098\n",
259+
"Adaptive multi-task lasso L2 to truth 0.7141545319969675\n",
260+
"FCP multi-task with LLA L2 to truth 0.7502835459738096\n",
261261
"\n",
262-
"Nuclear norm L2 to truth 0.709992352578054\n",
263-
"Adaptive nuclear norm L2 to truth 0.4736246352155898\n",
264-
"FCP nuclear nrom with LLA L2 to truth 0.5079065745103277\n"
262+
"Nuclear norm L2 to truth 0.7099944507829721\n",
263+
"Adaptive nuclear norm L2 to truth 0.47362674050743064\n",
264+
"FCP nuclear nrom with LLA L2 to truth 0.507907042256749\n"
265265
]
266266
}
267267
],

ya_glm/backends/andersoncd/glm_solver.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,12 @@ def solve_glm(X, y,
1313
loss_func='lin_reg',
1414
loss_kws={},
1515
fit_intercept=True,
16+
sample_weight=None,
1617

1718
lasso_pen=None,
1819
lasso_weights=None,
1920

21+
2022
# groups=None,
2123
# L1to2=False,
2224
# nuc=False,
@@ -31,6 +33,9 @@ def solve_glm(X, y,
3133
p0=10, verbose=0, tol=1e-4, prune=0,
3234
return_n_iter=False):
3335

36+
if sample_weight is not None:
37+
raise NotImplementedError("need to add")
38+
3439
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
3540
order='F', copy=False, accept_large_sparse=False)
3641
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,

ya_glm/backends/cvxpy/glm_solver.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ def solve_glm(X, y,
1414
loss_func='lin_reg',
1515
loss_kws={},
1616
fit_intercept=True,
17+
sample_weight=None,
1718
lasso_pen=None,
1819
lasso_weights=None,
1920
groups=None,
@@ -28,6 +29,9 @@ def solve_glm(X, y,
2829
solver=None,
2930
cp_kws={}):
3031

32+
if sample_weight is not None:
33+
raise NotImplementedError("need to add")
34+
3135
start_time = time()
3236
######################
3337
# objective function #

ya_glm/backends/fista/WL1SolverGlm.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ def solve(self, L1_weights, opt_init=None, opt_init_upv=None):
3232
loss_func=self.glm_loss,
3333

3434
loss_kws=self.loss_kws,
35+
sample_weight=self.sample_weight,
3536
fit_intercept=self.fit_intercept,
3637
lasso_pen=1,
3738
lasso_weights=L1_weights,

0 commit comments

Comments
 (0)