|
1 | | -.. include:: implemented-algorithms.rst |
| 1 | +Implemented algorithms |
| 2 | +---------------------- |
| 3 | + |
| 4 | +The core concept in ``adaptive`` is that of a *learner*. A *learner* |
| 5 | +samples a function at the best places in its parameter space to get |
| 6 | +maximum “information” about the function. As it evaluates the function |
| 7 | +at more and more points in the parameter space, it gets a better idea of |
| 8 | +where the best places are to sample next. |
| 9 | + |
| 10 | +Of course, what qualifies as the “best places” will depend on your |
| 11 | +application domain! ``adaptive`` makes some reasonable default choices, |
| 12 | +but the details of the adaptive sampling are completely customizable. |
| 13 | + |
| 14 | +The following learners are implemented: |
| 15 | + |
| 16 | +- `~adaptive.Learner1D`, for 1D functions ``f: ℝ → ℝ^N``, |
| 17 | +- `~adaptive.Learner2D`, for 2D functions ``f: ℝ^2 → ℝ^N``, |
| 18 | +- `~adaptive.LearnerND`, for ND functions ``f: ℝ^N → ℝ^M``, |
| 19 | +- `~adaptive.AverageLearner`, For stochastic functions where you want to |
| 20 | + average the result over many evaluations, |
| 21 | +- `~adaptive.IntegratorLearner`, for |
| 22 | + when you want to intergrate a 1D function ``f: ℝ → ℝ``, |
| 23 | +- `~adaptive.BalancingLearner`, for when you want to run several learners at once, |
| 24 | + selecting the “best” one each time you get more points. |
| 25 | + |
| 26 | +In addition to the learners, ``adaptive`` also provides primitives for |
| 27 | +running the sampling across several cores and even several machines, |
| 28 | +with built-in support for |
| 29 | +`concurrent.futures <https://docs.python.org/3/library/concurrent.futures.html>`_, |
| 30 | +`ipyparallel <https://ipyparallel.readthedocs.io/en/latest/>`_ and |
| 31 | +`distributed <https://distributed.readthedocs.io/en/latest/>`_. |
| 32 | + |
| 33 | +Examples |
| 34 | +-------- |
| 35 | + |
| 36 | +Here are some examples of how Adaptive samples vs. homogeneous sampling. Click |
| 37 | +on the *Play* :fa:`play` button or move the sliders. |
| 38 | + |
| 39 | +.. execute:: |
| 40 | + :hide-code: |
| 41 | + |
| 42 | + import itertools |
| 43 | + import adaptive |
| 44 | + from adaptive.learner.learner1D import uniform_loss, default_loss |
| 45 | + import holoviews as hv |
| 46 | + import numpy as np |
| 47 | + adaptive.notebook_extension() |
| 48 | + %output holomap='scrubber' |
| 49 | + |
| 50 | + |
| 51 | + |
| 52 | +`adaptive.Learner1D` |
| 53 | +~~~~~~~~~~~~~~~~~~~~ |
| 54 | + |
| 55 | +.. execute:: |
| 56 | + :hide-code: |
| 57 | + |
| 58 | + %%opts Layout [toolbar=None] |
| 59 | + def f(x, offset=0.07357338543088588): |
| 60 | + a = 0.01 |
| 61 | + return x + a**2 / (a**2 + (x - offset)**2) |
| 62 | + |
| 63 | + def plot_loss_interval(learner): |
| 64 | + if learner.npoints >= 2: |
| 65 | + x_0, x_1 = max(learner.losses, key=learner.losses.get) |
| 66 | + y_0, y_1 = learner.data[x_0], learner.data[x_1] |
| 67 | + x, y = [x_0, x_1], [y_0, y_1] |
| 68 | + else: |
| 69 | + x, y = [], [] |
| 70 | + return hv.Scatter((x, y)).opts(style=dict(size=6, color='r')) |
| 71 | + |
| 72 | + def plot(learner, npoints): |
| 73 | + adaptive.runner.simple(learner, lambda l: l.npoints == npoints) |
| 74 | + return (learner.plot() * plot_loss_interval(learner))[:, -1.1:1.1] |
| 75 | + |
| 76 | + def get_hm(loss_per_interval, N=101): |
| 77 | + learner = adaptive.Learner1D(f, bounds=(-1, 1), |
| 78 | + loss_per_interval=loss_per_interval) |
| 79 | + plots = {n: plot(learner, n) for n in range(N)} |
| 80 | + return hv.HoloMap(plots, kdims=['npoints']) |
| 81 | + |
| 82 | + (get_hm(uniform_loss).relabel('homogeneous samping') |
| 83 | + + get_hm(default_loss).relabel('with adaptive')) |
| 84 | + |
| 85 | + |
| 86 | + |
| 87 | +`adaptive.Learner2D` |
| 88 | +~~~~~~~~~~~~~~~~~~~~ |
| 89 | + |
| 90 | +.. execute:: |
| 91 | + :hide-code: |
| 92 | + |
| 93 | + def ring(xy): |
| 94 | + import numpy as np |
| 95 | + x, y = xy |
| 96 | + a = 0.2 |
| 97 | + return x + np.exp(-(x**2 + y**2 - 0.75**2)**2/a**4) |
| 98 | + |
| 99 | + def plot(learner, npoints): |
| 100 | + adaptive.runner.simple(learner, lambda l: l.npoints == npoints) |
| 101 | + learner2 = adaptive.Learner2D(ring, bounds=learner.bounds) |
| 102 | + xs = ys = np.linspace(*learner.bounds[0], learner.npoints**0.5) |
| 103 | + xys = list(itertools.product(xs, ys)) |
| 104 | + learner2.tell_many(xys, map(ring, xys)) |
| 105 | + return (learner2.plot().relabel('homogeneous grid') |
| 106 | + + learner.plot().relabel('with adaptive') |
| 107 | + + learner2.plot(tri_alpha=0.5).relabel('homogeneous sampling') |
| 108 | + + learner.plot(tri_alpha=0.5).relabel('with adaptive')).cols(2) |
| 109 | +
|
| 110 | + learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) |
| 111 | + plots = {n: plot(learner, n) for n in range(4, 1010, 20)} |
| 112 | + hv.HoloMap(plots, kdims=['npoints']).collate() |
| 113 | + |
| 114 | + |
| 115 | + |
| 116 | +`adaptive.AverageLearner` |
| 117 | +~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 118 | + |
| 119 | +.. execute:: |
| 120 | + :hide-code: |
| 121 | + |
| 122 | + def g(n): |
| 123 | + import random |
| 124 | + random.seed(n) |
| 125 | + val = random.gauss(0.5, 0.5) |
| 126 | + return val |
| 127 | + |
| 128 | + learner = adaptive.AverageLearner(g, atol=None, rtol=0.01) |
| 129 | + |
| 130 | + def plot(learner, npoints): |
| 131 | + adaptive.runner.simple(learner, lambda l: l.npoints == npoints) |
| 132 | + return learner.plot().relabel(f'loss={learner.loss():.2f}') |
| 133 | + |
| 134 | + plots = {n: plot(learner, n) for n in range(10, 10000, 200)} |
| 135 | + hv.HoloMap(plots, kdims=['npoints']) |
| 136 | + |
| 137 | + |
| 138 | +see more in the :ref:`Tutorial Adaptive`. |
| 139 | + |
2 | 140 |
|
3 | 141 | .. include:: ../../README.rst |
4 | | - :start-after: implemented-algorithms-end |
| 142 | + :start-after: not-in-documentation-end |
0 commit comments