|
1 | | -Implemented algorithms |
2 | | ----------------------- |
3 | | - |
4 | | -The core concept in ``adaptive`` is that of a *learner*. A *learner* |
5 | | -samples a function at the best places in its parameter space to get |
6 | | -maximum “information” about the function. As it evaluates the function |
7 | | -at more and more points in the parameter space, it gets a better idea of |
8 | | -where the best places are to sample next. |
9 | | - |
10 | | -Of course, what qualifies as the “best places” will depend on your |
11 | | -application domain! ``adaptive`` makes some reasonable default choices, |
12 | | -but the details of the adaptive sampling are completely customizable. |
13 | | - |
14 | | -The following learners are implemented: |
15 | | - |
16 | | -- `~adaptive.Learner1D`, for 1D functions ``f: ℝ → ℝ^N``, |
17 | | -- `~adaptive.Learner2D`, for 2D functions ``f: ℝ^2 → ℝ^N``, |
18 | | -- `~adaptive.LearnerND`, for ND functions ``f: ℝ^N → ℝ^M``, |
19 | | -- `~adaptive.AverageLearner`, for random variables where you want to |
20 | | - average the result over many evaluations, |
21 | | -- `~adaptive.AverageLearner1D`, for stochastic 1D functions where you want to |
22 | | - estimate the mean value of the function at each point, |
23 | | -- `~adaptive.IntegratorLearner`, for |
24 | | - when you want to intergrate a 1D function ``f: ℝ → ℝ``. |
25 | | - |
26 | | -Meta-learners (to be used with other learners): |
27 | | - |
28 | | -- `~adaptive.BalancingLearner`, for when you want to run several learners at once, |
29 | | - selecting the “best” one each time you get more points, |
30 | | -- `~adaptive.DataSaver`, for when your function doesn't just return a scalar or a vector. |
31 | | - |
32 | | -In addition to the learners, ``adaptive`` also provides primitives for |
33 | | -running the sampling across several cores and even several machines, |
34 | | -with built-in support for |
35 | | -`concurrent.futures <https://docs.python.org/3/library/concurrent.futures.html>`_, |
36 | | -`ipyparallel <https://ipyparallel.readthedocs.io/en/latest/>`_ and |
37 | | -`distributed <https://distributed.readthedocs.io/en/latest/>`_. |
38 | | - |
39 | | -Examples |
40 | | --------- |
41 | | - |
42 | | -Here are some examples of how Adaptive samples vs. homogeneous sampling. Click |
43 | | -on the *Play* :fa:`play` button or move the sliders. |
44 | | - |
45 | | -.. jupyter-execute:: |
46 | | - :hide-code: |
47 | | - |
48 | | - import itertools |
49 | | - import adaptive |
50 | | - from adaptive.learner.learner1D import uniform_loss, default_loss |
51 | | - import holoviews as hv |
52 | | - import numpy as np |
53 | | - |
54 | | - adaptive.notebook_extension() |
55 | | - hv.output(holomap="scrubber") |
56 | | - |
57 | | -`adaptive.Learner1D` |
58 | | -~~~~~~~~~~~~~~~~~~~~ |
59 | | - |
60 | | -.. jupyter-execute:: |
61 | | - :hide-code: |
62 | | - |
63 | | - def f(x, offset=0.07357338543088588): |
64 | | - a = 0.01 |
65 | | - return x + a**2 / (a**2 + (x - offset)**2) |
66 | | - |
67 | | - def plot_loss_interval(learner): |
68 | | - if learner.npoints >= 2: |
69 | | - x_0, x_1 = max(learner.losses, key=learner.losses.get) |
70 | | - y_0, y_1 = learner.data[x_0], learner.data[x_1] |
71 | | - x, y = [x_0, x_1], [y_0, y_1] |
72 | | - else: |
73 | | - x, y = [], [] |
74 | | - return hv.Scatter((x, y)).opts(style=dict(size=6, color="r")) |
75 | | - |
76 | | - def plot(learner, npoints): |
77 | | - adaptive.runner.simple(learner, lambda l: l.npoints == npoints) |
78 | | - return (learner.plot() * plot_loss_interval(learner))[:, -1.1:1.1] |
79 | | - |
80 | | - def get_hm(loss_per_interval, N=101): |
81 | | - learner = adaptive.Learner1D(f, bounds=(-1, 1), loss_per_interval=loss_per_interval) |
82 | | - plots = {n: plot(learner, n) for n in range(N)} |
83 | | - return hv.HoloMap(plots, kdims=["npoints"]) |
84 | | - |
85 | | - layout = ( |
86 | | - get_hm(uniform_loss).relabel("homogeneous samping") |
87 | | - + get_hm(default_loss).relabel("with adaptive") |
88 | | - ) |
89 | | - |
90 | | - layout.opts(plot=dict(toolbar=None)) |
91 | | - |
92 | | -`adaptive.Learner2D` |
93 | | -~~~~~~~~~~~~~~~~~~~~ |
94 | | - |
95 | | -.. jupyter-execute:: |
96 | | - :hide-code: |
97 | | - |
98 | | - def ring(xy): |
99 | | - import numpy as np |
100 | | - x, y = xy |
101 | | - a = 0.2 |
102 | | - return x + np.exp(-(x**2 + y**2 - 0.75**2)**2/a**4) |
103 | | - |
104 | | - def plot(learner, npoints): |
105 | | - adaptive.runner.simple(learner, lambda l: l.npoints == npoints) |
106 | | - learner2 = adaptive.Learner2D(ring, bounds=learner.bounds) |
107 | | - xs = ys = np.linspace(*learner.bounds[0], int(learner.npoints**0.5)) |
108 | | - xys = list(itertools.product(xs, ys)) |
109 | | - learner2.tell_many(xys, map(ring, xys)) |
110 | | - return (learner2.plot().relabel('homogeneous grid') |
111 | | - + learner.plot().relabel('with adaptive') |
112 | | - + learner2.plot(tri_alpha=0.5).relabel('homogeneous sampling') |
113 | | - + learner.plot(tri_alpha=0.5).relabel('with adaptive')).cols(2) |
114 | | -
|
115 | | - learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) |
116 | | - plots = {n: plot(learner, n) for n in range(4, 1010, 20)} |
117 | | - hv.HoloMap(plots, kdims=['npoints']).collate() |
118 | | - |
119 | | -`adaptive.AverageLearner` |
120 | | -~~~~~~~~~~~~~~~~~~~~~~~~~ |
121 | | - |
122 | | -.. jupyter-execute:: |
123 | | - :hide-code: |
124 | | - |
125 | | - def g(n): |
126 | | - import random |
127 | | - random.seed(n) |
128 | | - val = random.gauss(0.5, 0.5) |
129 | | - return val |
130 | | - |
131 | | - learner = adaptive.AverageLearner(g, atol=None, rtol=0.01) |
132 | | - |
133 | | - def plot(learner, npoints): |
134 | | - adaptive.runner.simple(learner, lambda l: l.npoints == npoints) |
135 | | - return learner.plot().relabel(f'loss={learner.loss():.2f}') |
136 | | - |
137 | | - plots = {n: plot(learner, n) for n in range(10, 10000, 200)} |
138 | | - hv.HoloMap(plots, kdims=['npoints']) |
139 | | - |
140 | | -`adaptive.LearnerND` |
141 | | -~~~~~~~~~~~~~~~~~~~~ |
142 | | - |
143 | | -.. jupyter-execute:: |
144 | | - :hide-code: |
145 | | - |
146 | | - def sphere(xyz): |
147 | | - import numpy as np |
148 | | - x, y, z = xyz |
149 | | - a = 0.4 |
150 | | - return np.exp(-(x**2 + y**2 + z**2 - 0.75**2)**2/a**4) |
151 | | - |
152 | | - learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)]) |
153 | | - adaptive.runner.simple(learner, lambda l: l.npoints == 5000) |
154 | | - |
155 | | - fig = learner.plot_3D(return_fig=True) |
156 | | - |
157 | | - # Remove a slice from the plot to show the inside of the sphere |
158 | | - scatter = fig.data[0] |
159 | | - coords_col = [ |
160 | | - (x, y, z, color) |
161 | | - for x, y, z, color in zip( |
162 | | - scatter["x"], scatter["y"], scatter["z"], scatter.marker["color"] |
163 | | - ) |
164 | | - if not (x > 0 and y > 0) |
165 | | - ] |
166 | | - scatter["x"], scatter["y"], scatter["z"], scatter.marker["color"] = zip(*coords_col) |
167 | | -
|
168 | | - fig |
169 | | - |
170 | | -see more in the :ref:`Tutorial Adaptive`. |
171 | 1 |
|
172 | 2 | .. include:: ../../README.rst |
173 | 3 | :start-after: not-in-documentation-end |
|
0 commit comments