Skip to content

Commit 199df20

Browse files
ryanhrob1Naim
authored andcommitted
vmalloc: Optimize vfree with free_pages_bulk()
Whenever vmalloc allocates high order pages (e.g. for a huge mapping) it must immediately split_page() to order-0 so that it remains compatible with users that want to access the underlying struct page. Commit a061578 ("mm/vmalloc: request large order pages from buddy allocator") recently made it much more likely for vmalloc to allocate high order pages which are subsequently split to order-0. Unfortunately this had the side effect of causing performance regressions for tight vmalloc/vfree loops (e.g. test_vmalloc.ko benchmarks). See Closes: tag. This happens because the high order pages must be gotten from the buddy but then because they are split to order-0, when they are freed they are freed to the order-0 pcp. Previously allocation was for order-0 pages so they were recycled from the pcp. It would be preferable if when vmalloc allocates an (e.g.) order-3 page that it also frees that order-3 page to the order-3 pcp, then the regression could be removed. So let's do exactly that; update stats separately first as coalescing is hard to do correctly without complexity. Use free_pages_bulk() which uses the new __free_contig_range() API to batch-free contiguous ranges of pfns. This not only removes the regression, but significantly improves performance of vfree beyond the baseline. A selection of test_vmalloc benchmarks running on arm64 server class system. mm-new is the baseline. Commit a061578 ("mm/vmalloc: request large order pages from buddy allocator") was added in v6.19-rc1 where we see regressions. Then with this change performance is much better. (>0 is faster, <0 is slower, (R)/(I) = statistically significant Regression/Improvement): +-----------------+----------------------------------------------------------+-------------------+--------------------+ | Benchmark | Result Class | mm-new | this series | +=================+==========================================================+===================+====================+ | micromm/vmalloc | fix_align_alloc_test: p:1, h:0, l:500000 (usec) | 1331843.33 | (I) 67.17% | | | fix_size_alloc_test: p:1, h:0, l:500000 (usec) | 415907.33 | -5.14% | | | fix_size_alloc_test: p:4, h:0, l:500000 (usec) | 755448.00 | (I) 53.55% | | | fix_size_alloc_test: p:16, h:0, l:500000 (usec) | 1591331.33 | (I) 57.26% | | | fix_size_alloc_test: p:16, h:1, l:500000 (usec) | 1594345.67 | (I) 68.46% | | | fix_size_alloc_test: p:64, h:0, l:100000 (usec) | 1071826.00 | (I) 79.27% | | | fix_size_alloc_test: p:64, h:1, l:100000 (usec) | 1018385.00 | (I) 84.17% | | | fix_size_alloc_test: p:256, h:0, l:100000 (usec) | 3970899.67 | (I) 77.01% | | | fix_size_alloc_test: p:256, h:1, l:100000 (usec) | 3821788.67 | (I) 89.44% | | | fix_size_alloc_test: p:512, h:0, l:100000 (usec) | 7795968.00 | (I) 82.67% | | | fix_size_alloc_test: p:512, h:1, l:100000 (usec) | 6530169.67 | (I) 118.09% | | | full_fit_alloc_test: p:1, h:0, l:500000 (usec) | 626808.33 | -0.98% | | | kvfree_rcu_1_arg_vmalloc_test: p:1, h:0, l:500000 (usec) | 532145.67 | -1.68% | | | kvfree_rcu_2_arg_vmalloc_test: p:1, h:0, l:500000 (usec) | 537032.67 | -0.96% | | | long_busy_list_alloc_test: p:1, h:0, l:500000 (usec) | 8805069.00 | (I) 74.58% | | | pcpu_alloc_test: p:1, h:0, l:500000 (usec) | 500824.67 | 4.35% | | | random_size_align_alloc_test: p:1, h:0, l:500000 (usec) | 1637554.67 | (I) 76.99% | | | random_size_alloc_test: p:1, h:0, l:500000 (usec) | 4556288.67 | (I) 72.23% | | | vm_map_ram_test: p:1, h:0, l:500000 (usec) | 107371.00 | -0.70% | +-----------------+----------------------------------------------------------+-------------------+--------------------+ Fixes: a061578 ("mm/vmalloc: request large order pages from buddy allocator") Closes: https://lore.kernel.org/all/66919a28-bc81-49c9-b68f-dd7c73395a0d@arm.com/ Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org> Acked-by: Zi Yan <ziy@nvidia.com> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Co-developed-by: Muhammad Usama Anjum <usama.anjum@arm.com> Signed-off-by: Muhammad Usama Anjum <usama.anjum@arm.com> Acked-by: David Hildenbrand (Arm) <david@kernel.org> Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
1 parent 4cfc9ec commit 199df20

3 files changed

Lines changed: 31 additions & 12 deletions

File tree

include/linux/gfp.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -239,6 +239,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
239239
struct page **page_array);
240240
#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
241241

242+
void free_pages_bulk(struct page **page_array, unsigned long nr_pages);
243+
242244
unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
243245
unsigned long nr_pages,
244246
struct page **page_array);

mm/page_alloc.c

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5214,6 +5214,34 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
52145214
}
52155215
EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
52165216

5217+
/*
5218+
* free_pages_bulk - Free an array of order-0 pages
5219+
* @page_array: Array of pages to free
5220+
* @nr_pages: The number of pages in the array
5221+
*
5222+
* Free the order-0 pages. Adjacent entries whose PFNs form a contiguous
5223+
* run are released with a single __free_contig_range() call.
5224+
*
5225+
* This assumes page_array is sorted in ascending PFN order. Without that,
5226+
* the function still frees all pages, but contiguous runs may not be
5227+
* detected and the freeing pattern can degrade to freeing one page at a
5228+
* time.
5229+
*
5230+
* Context: Sleepable process context only; calls cond_resched()
5231+
*/
5232+
void free_pages_bulk(struct page **page_array, unsigned long nr_pages)
5233+
{
5234+
while (nr_pages) {
5235+
unsigned long nr_contig = num_pages_contiguous(page_array, nr_pages);
5236+
5237+
__free_contig_range(page_to_pfn(*page_array), nr_contig);
5238+
5239+
nr_pages -= nr_contig;
5240+
page_array += nr_contig;
5241+
cond_resched();
5242+
}
5243+
}
5244+
52175245
/*
52185246
* This is the 'heart' of the zoned buddy allocator.
52195247
*/

mm/vmalloc.c

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3442,7 +3442,6 @@ void vfree_atomic(const void *addr)
34423442
void vfree(const void *addr)
34433443
{
34443444
struct vm_struct *vm;
3445-
int i;
34463445

34473446
if (unlikely(in_interrupt())) {
34483447
vfree_atomic(addr);
@@ -3468,17 +3467,7 @@ void vfree(const void *addr)
34683467
/* All pages of vm should be charged to same memcg, so use first one. */
34693468
if (vm->nr_pages && !(vm->flags & VM_MAP_PUT_PAGES))
34703469
mod_memcg_page_state(vm->pages[0], MEMCG_VMALLOC, -vm->nr_pages);
3471-
for (i = 0; i < vm->nr_pages; i++) {
3472-
struct page *page = vm->pages[i];
3473-
3474-
BUG_ON(!page);
3475-
/*
3476-
* High-order allocs for huge vmallocs are split, so
3477-
* can be freed as an array of order-0 allocations
3478-
*/
3479-
__free_page(page);
3480-
cond_resched();
3481-
}
3470+
free_pages_bulk(vm->pages, vm->nr_pages);
34823471
if (!(vm->flags & VM_MAP_PUT_PAGES))
34833472
atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
34843473
kvfree(vm->pages);

0 commit comments

Comments
 (0)