|
34 | 34 | ## Right now we hardcode it to use `ForwardDiff` |
35 | 35 | function (alg::ApproximateJacobianSparsity)(ad::AbstractSparseADType, f::F, x; fx = nothing, |
36 | 36 | kwargs...) where {F} |
| 37 | + if !(ad isa AutoSparseForwardDiff) |
| 38 | + @warn "$(ad) support for approximate jacobian not implemented. Using ForwardDiff instead." maxlog=1 |
| 39 | + end |
37 | 40 | @unpack ntrials, rng = alg |
38 | 41 | fx = fx === nothing ? f(x) : fx |
39 | | - J = fill!(similar(fx, length(fx), length(x)), 0) |
40 | 42 | cfg = ForwardDiff.JacobianConfig(f, x) |
| 43 | + J = fill!(similar(fx, length(fx), length(x)), 0) |
| 44 | + J_cache = similar(J) |
| 45 | + x_ = similar(x) |
| 46 | + for _ in 1:ntrials |
| 47 | + randn!(rng, x_) |
| 48 | + ForwardDiff.jacobian!(J_cache, f, x_, cfg) |
| 49 | + @. J += abs(J_cache) |
| 50 | + end |
| 51 | + return (JacPrototypeSparsityDetection(; jac_prototype = sparse(J), alg.alg))(ad, f, x; |
| 52 | + fx, kwargs...) |
| 53 | +end |
| 54 | + |
| 55 | +function (alg::ApproximateJacobianSparsity)(ad::AbstractSparseADType, f::F, fx, x; |
| 56 | + kwargs...) where {F} |
| 57 | + if !(ad isa AutoSparseForwardDiff) |
| 58 | + @warn "$(ad) support for approximate jacobian not implemented. Using ForwardDiff instead." maxlog=1 |
| 59 | + end |
| 60 | + @unpack ntrials, rng = alg |
| 61 | + cfg = ForwardDiff.JacobianConfig(f, fx, x) |
| 62 | + J = fill!(similar(fx, length(fx), length(x)), 0) |
| 63 | + J_cache = similar(J) |
| 64 | + x_ = similar(x) |
| 65 | + for _ in 1:ntrials |
| 66 | + randn!(rng, x_) |
| 67 | + ForwardDiff.jacobian!(J_cache, f, fx, x_, cfg) |
| 68 | + @. J += abs(J_cache) |
| 69 | + end |
| 70 | + return (JacPrototypeSparsityDetection(; jac_prototype = sparse(J), alg.alg))(ad, f, x; |
| 71 | + fx, kwargs...) |
| 72 | +end |
| 73 | + |
| 74 | +function (alg::ApproximateJacobianSparsity)(ad::AutoSparseFiniteDiff, f::F, x; fx = nothing, |
| 75 | + kwargs...) where {F} |
| 76 | + @unpack ntrials, rng = alg |
| 77 | + fx = fx === nothing ? f(x) : fx |
| 78 | + cache = FiniteDiff.JacobianCache(x, fx) |
| 79 | + J = fill!(similar(fx, length(fx), length(x)), 0) |
| 80 | + x_ = similar(x) |
| 81 | + ε = ifelse(alg.epsilon === nothing, eps(eltype(x)) * 100, alg.epsilon) |
41 | 82 | for _ in 1:ntrials |
42 | | - x_ = similar(x) |
43 | 83 | randn!(rng, x_) |
44 | | - J .+= abs.(ForwardDiff.jacobian(f, x_, cfg)) |
| 84 | + J_cache = FiniteDiff.finite_difference_jacobian(f, x, cache) |
| 85 | + @. J += (abs(J_cache) .≥ ε) # hedge against numerical issues |
45 | 86 | end |
46 | 87 | return (JacPrototypeSparsityDetection(; jac_prototype = sparse(J), alg.alg))(ad, f, x; |
47 | 88 | fx, kwargs...) |
48 | 89 | end |
49 | 90 |
|
50 | | -function (alg::ApproximateJacobianSparsity)(ad::AbstractSparseADType, f!::F, fx, x; |
| 91 | +function (alg::ApproximateJacobianSparsity)(ad::AutoSparseFiniteDiff, f!::F, fx, x; |
51 | 92 | kwargs...) where {F} |
52 | 93 | @unpack ntrials, rng = alg |
53 | | - cfg = ForwardDiff.JacobianConfig(f!, fx, x) |
| 94 | + cache = FiniteDiff.JacobianCache(x, fx) |
54 | 95 | J = fill!(similar(fx, length(fx), length(x)), 0) |
| 96 | + J_cache = similar(J) |
| 97 | + x_ = similar(x) |
| 98 | + ε = ifelse(alg.epsilon === nothing, eps(eltype(x)) * 100, alg.epsilon) |
55 | 99 | for _ in 1:ntrials |
56 | | - x_ = similar(x) |
57 | 100 | randn!(rng, x_) |
58 | | - J .+= abs.(ForwardDiff.jacobian(f!, fx, x_, cfg)) |
| 101 | + FiniteDiff.finite_difference_jacobian!(J_cache, f!, x_, cache) |
| 102 | + @. J += (abs(J_cache) .≥ ε) # hedge against numerical issues |
59 | 103 | end |
60 | 104 | return (JacPrototypeSparsityDetection(; jac_prototype = sparse(J), alg.alg))(ad, f!, fx, |
61 | 105 | x; kwargs...) |
|
0 commit comments