Skip to content

Commit fcd1b3c

Browse files
committed
[Jenkins] auto-formatting by clang-format version 10.0.0-4ubuntu1
1 parent 7a47143 commit fcd1b3c

4 files changed

Lines changed: 39 additions & 32 deletions

File tree

stan/math/mix/functor/laplace_marginal_density_estimator.hpp

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -444,7 +444,8 @@ inline void llt_with_jitter(LLT& llt_B, B_t& B, double min_jitter = 1e-10,
444444
}
445445
if (llt_B.info() != Eigen::Success) {
446446
throw std::domain_error(
447-
"laplace_marginal_density: Cholesky failed after adding jitter up to " + std::to_string(jitter_try));
447+
"laplace_marginal_density: Cholesky failed after adding jitter up to "
448+
+ std::to_string(jitter_try));
448449
}
449450
}
450451
}
@@ -964,10 +965,11 @@ inline auto run_newton_loop(SolverPolicy& solver, NewtonStateT& state,
964965
* Stop when objective change is small, or when a rejected Wolfe step
965966
* fails to improve; finish_update then exits the Newton loop.
966967
*/
967-
bool objective_converged = std::abs(state.curr().obj() - state.prev().obj())
968-
< options.tolerance;
968+
bool objective_converged
969+
= std::abs(state.curr().obj() - state.prev().obj())
970+
< options.tolerance;
969971
bool search_failed = (!state.wolfe_status.accept_
970-
&& state.curr().obj() <= state.prev().obj());
972+
&& state.curr().obj() <= state.prev().obj());
971973
finish_update = objective_converged || search_failed;
972974
}
973975
if (finish_update) {
@@ -1037,8 +1039,10 @@ inline decltype(auto) theta_init_impl(Eigen::Index theta_size, Opts&& options) {
10371039
/**
10381040
* @brief Create the update function for the line search, capturing necessary
10391041
* references.
1040-
* @tparam ObjFun Callable type for the objective function (accepting (a, theta))
1041-
* @tparam ThetaGradFun Callable type for the theta gradient function (accepting theta)
1042+
* @tparam ObjFun Callable type for the objective function (accepting (a,
1043+
* theta))
1044+
* @tparam ThetaGradFun Callable type for the theta gradient function (accepting
1045+
* theta)
10421046
* @tparam Covariance Type of the covariance matrix
10431047
* @tparam Options Type of the options struct containing line search parameters
10441048
* @param[in] obj_fun Objective function functor
@@ -1050,12 +1054,13 @@ inline decltype(auto) theta_init_impl(Eigen::Index theta_size, Opts&& options) {
10501054
* bool update_fun(proposal, curr, prev, eval_in, p)
10511055
* ```
10521056
*/
1053-
template <typename ObjFun, typename ThetaGradFun, typename Covariance, typename Options>
1057+
template <typename ObjFun, typename ThetaGradFun, typename Covariance,
1058+
typename Options>
10541059
inline auto create_update_fun(ObjFun&& obj_fun, ThetaGradFun&& theta_grad_f,
1055-
Covariance&& covariance, Options&& options) {
1060+
Covariance&& covariance, Options&& options) {
10561061
auto update_step = [&covariance, &obj_fun, &theta_grad_f](
1057-
auto& proposal, auto&& /* curr */, auto&& prev,
1058-
auto& eval_in, auto&& p) {
1062+
auto& proposal, auto&& /* curr */, auto&& prev,
1063+
auto& eval_in, auto&& p) {
10591064
try {
10601065
proposal.a() = prev.a() + eval_in.alpha() * p;
10611066
proposal.theta().noalias() = covariance * proposal.a();
@@ -1073,7 +1078,8 @@ inline auto create_update_fun(ObjFun&& obj_fun, ThetaGradFun&& theta_grad_f,
10731078
eval.alpha() *= options.line_search.tau;
10741079
return eval.alpha() > options.line_search.min_alpha;
10751080
};
1076-
return [update_step_ = std::move(update_step), backoff_ = std::move(backoff)](
1081+
return
1082+
[update_step_ = std::move(update_step), backoff_ = std::move(backoff)](
10771083
auto& proposal, auto&& curr, auto&& prev, auto& eval_in, auto&& p) {
10781084
return internal::retry_evaluate(update_step_, proposal, curr, prev,
10791085
eval_in, p, backoff_);
@@ -1153,7 +1159,8 @@ inline auto laplace_marginal_density_est(
11531159
decltype(auto) theta_init = theta_init_impl<InitTheta>(theta_size, options);
11541160
internal::NewtonState state(theta_size, obj_fun, theta_grad_f, theta_init);
11551161
// Start with safe step size
1156-
auto update_fun = create_update_fun(std::move(obj_fun), std::move(theta_grad_f), covariance, options);
1162+
auto update_fun = create_update_fun(
1163+
std::move(obj_fun), std::move(theta_grad_f), covariance, options);
11571164
Eigen::Index step_iter = 0;
11581165
try {
11591166
if (options.solver == 1) {

stan/math/mix/functor/wolfe_line_search.hpp

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -157,9 +157,9 @@ namespace internal {
157157
*/
158158
template <typename Scalar>
159159
[[nodiscard]] inline Scalar cubic_spline(Scalar x_left, Scalar f_left,
160-
Scalar df_left, Scalar x_right,
161-
Scalar f_right,
162-
Scalar df_right) noexcept {
160+
Scalar df_left, Scalar x_right,
161+
Scalar f_right,
162+
Scalar df_right) noexcept {
163163
const Scalar midpoint = (x_left + x_right) / Scalar(2);
164164

165165
// Basic validation: ordering + finiteness.
@@ -284,8 +284,8 @@ template <typename Scalar>
284284

285285
template <typename Eval, typename Options>
286286
inline auto cubic_spline(Eval&& low, Eval&& high, Options&& opt) {
287-
auto alpha = cubic_spline(low.alpha(), low.obj(), low.dir(),
288-
high.alpha(), high.obj(), high.dir());
287+
auto alpha = cubic_spline(low.alpha(), low.obj(), low.dir(), high.alpha(),
288+
high.obj(), high.dir());
289289
const double width = high.alpha() - low.alpha();
290290
const double guard = 1e-3 * width; // or make this an option
291291
alpha = std::clamp(alpha, low.alpha() + guard, high.alpha() - guard);
@@ -608,13 +608,13 @@ inline auto retry_evaluate(Update&& update, Proposal&& proposal, Curr&& curr,
608608
* The search maintains a left endpoint `low`, a right endpoint `high`,
609609
* and a fallback `best` (best Armijo-satisfying point seen so far).
610610
* Non-finite evaluations are contracted by factor `opt.tau` automatically.
611-
*
611+
*
612612
* ## Phase 1: Aggresive Expansion
613613
* The user given initial step size is expanded by `opt.scale_up` until
614614
* either Wolfe conditions are violated or `opt.max_alpha` is reached.
615615
* If the first evaluation satisfies both conditions, the search expands
616616
* further ("zoom-up") to find the largest such step before accepting.
617-
*
617+
*
618618
* ## Phase 2: Expansion / Bracketing
619619
*
620620
* Starting from $\alpha_0 = \text{clamp}(\text{curr.alpha} \cdot
@@ -632,9 +632,9 @@ inline auto retry_evaluate(Update&& update, Proposal&& proposal, Curr&& curr,
632632
* | T | F | <= 0 | Bracket found, go to zoom |
633633
* | F | - | - | Bracket found, go to zoom |
634634
* ```
635-
*
635+
*
636636
* The goal of this phase is to find a valid bracket `[low, high]`
637-
* Ideally such that the low endpoint satisfies Armijo and has
637+
* Ideally such that the low endpoint satisfies Armijo and has
638638
* a positive derivative, while the high endpoint has a negative
639639
* derivative. This gives us a shape like /\ to search through
640640
* in the zoom phase. If such a bracket cannot be found
@@ -790,10 +790,10 @@ inline WolfeStatus wolfe_line_search(Info& wolfe_info, UpdateFun&& update_fun,
790790
return wolfe_check;
791791
}
792792
if (check_armijo(high, prev, opt)) {
793-
if (check_wolfe(high, prev, opt)) { // [1]
793+
if (check_wolfe(high, prev, opt)) { // [1]
794794
curr.update(scratch, high);
795795
return WolfeStatus{WolfeReturn::Wolfe, total_updates, num_backtracks,
796-
true};
796+
true};
797797
}
798798
if (best.obj() < high.obj()) {
799799
best = high;
@@ -887,7 +887,7 @@ inline WolfeStatus wolfe_line_search(Info& wolfe_info, UpdateFun&& update_fun,
887887
num_backtracks, false};
888888
}
889889
if (check_armijo(mid, prev, opt)) {
890-
if (check_wolfe(mid, prev, opt)) { // [1]
890+
if (check_wolfe(mid, prev, opt)) { // [1]
891891
curr.update(scratch, mid);
892892
return WolfeStatus{WolfeReturn::Wolfe, total_updates, num_backtracks,
893893
true};
@@ -897,16 +897,16 @@ inline WolfeStatus wolfe_line_search(Info& wolfe_info, UpdateFun&& update_fun,
897897
best = mid;
898898
}
899899
if (mid.obj() > low.obj()) {
900-
if (mid.dir() > 0) { // [2]
900+
if (mid.dir() > 0) { // [2]
901901
low = mid;
902-
} else { // [3]
902+
} else { // [3]
903903
high = mid;
904904
}
905905
}
906906
// [4]
907-
high = mid;
908-
} else {
909-
// [5]
907+
high = mid;
908+
} else {
909+
// [5]
910910
high = mid;
911911
}
912912
// Convergence/guard-rail checks (uses prev/grad_tol/obj_tol etc.)

test/unit/math/laplace/laplace_marginal_bernoulli_logit_lpmf_test.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ TEST_P(laplace_marginal_bernoulli_logit_lpmf, phi_dim500) {
2525
const auto [solver_num, hessian_block_size, max_steps_line_search]
2626
= GetParam();
2727
LAPLACE_SKIP_IF_INVALID_TEST_COMBO(hessian_block_size, dim_theta);
28-
//LAPLACE_SKIP_ZERO_STEPS(max_steps_line_search);
28+
// LAPLACE_SKIP_ZERO_STEPS(max_steps_line_search);
2929

3030
auto x1 = stan::test::laplace::x1;
3131
auto x2 = stan::test::laplace::x2;

test/unit/math/laplace/wolfe_line_search_test.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -798,8 +798,8 @@ TEST(CubicOrBisect, ReturnsInteriorMaximiser) {
798798
// Checks that the chooser falls back to the midpoint on non-finite data.
799799
TEST(CubicOrBisect, FallsBackToMidpointOnNonfinite) {
800800
using stan::math::internal::cubic_spline;
801-
double alpha = cubic_spline(
802-
0.0, std::numeric_limits<double>::quiet_NaN(), 1.0, 1.0, -1.0, -0.5);
801+
double alpha = cubic_spline(0.0, std::numeric_limits<double>::quiet_NaN(),
802+
1.0, 1.0, -1.0, -0.5);
803803
EXPECT_DOUBLE_EQ(alpha, 0.5);
804804
}
805805

0 commit comments

Comments
 (0)