Skip to content

Commit cea5692

Browse files
committed
Revert "mm/page_alloc: Optimize free_contig_range()"
This reverts commit 8214813. Signed-off-by: Eric Naim <dnaim@cachyos.org>
1 parent a2873f1 commit cea5692

2 files changed

Lines changed: 2 additions & 103 deletions

File tree

include/linux/gfp.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -464,8 +464,6 @@ void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages);
464464
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
465465
#endif
466466

467-
void __free_contig_range(unsigned long pfn, unsigned long nr_pages);
468-
469467
DEFINE_FREE(free_page, void *, free_page((unsigned long)_T))
470468

471469
#endif /* __LINUX_GFP_H */

mm/page_alloc.c

Lines changed: 2 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -90,9 +90,6 @@ typedef int __bitwise fpi_t;
9090
/* Free the page without taking locks. Rely on trylock only. */
9191
#define FPI_TRYLOCK ((__force fpi_t)BIT(2))
9292

93-
/* free_pages_prepare() has already been called for page(s) being freed. */
94-
#define FPI_PREPARED ((__force fpi_t)BIT(3))
95-
9693
/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
9794
static DEFINE_MUTEX(pcp_batch_high_lock);
9895
#define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
@@ -1351,9 +1348,6 @@ __always_inline bool __free_pages_prepare(struct page *page,
13511348
bool compound = PageCompound(page);
13521349
struct folio *folio = page_folio(page);
13531350

1354-
if (fpi_flags & FPI_PREPARED)
1355-
return true;
1356-
13571351
VM_BUG_ON_PAGE(PageTail(page), page);
13581352

13591353
trace_mm_page_free(page, order);
@@ -6830,100 +6824,6 @@ void __init page_alloc_sysctl_init(void)
68306824
register_sysctl_init("vm", page_alloc_sysctl_table);
68316825
}
68326826

6833-
static void free_prepared_contig_range(struct page *page,
6834-
unsigned long nr_pages)
6835-
{
6836-
while (nr_pages) {
6837-
unsigned int order;
6838-
unsigned long pfn;
6839-
6840-
pfn = page_to_pfn(page);
6841-
/* We are limited by the largest buddy order. */
6842-
order = pfn ? __ffs(pfn) : MAX_PAGE_ORDER;
6843-
/* Don't exceed the number of pages to free. */
6844-
order = min_t(unsigned int, order, ilog2(nr_pages));
6845-
order = min_t(unsigned int, order, MAX_PAGE_ORDER);
6846-
6847-
/*
6848-
* Free the chunk as a single block. Our caller has already
6849-
* called free_pages_prepare() for each order-0 page.
6850-
*/
6851-
__free_frozen_pages(page, order, FPI_PREPARED);
6852-
6853-
page += 1UL << order;
6854-
nr_pages -= 1UL << order;
6855-
}
6856-
}
6857-
6858-
static void __free_contig_range_common(unsigned long pfn, unsigned long nr_pages,
6859-
bool is_frozen)
6860-
{
6861-
struct page *page = pfn_to_page(pfn);
6862-
struct page *start = NULL;
6863-
unsigned long start_sec;
6864-
bool can_free = true;
6865-
unsigned long i;
6866-
6867-
/*
6868-
* Contiguous PFNs might not have a contiguous "struct pages" in some
6869-
* kernel config. Therefore, check memdesc_section(), and stop batching
6870-
* once it changes, see num_pages_contiguous().
6871-
*/
6872-
for (i = 0; i < nr_pages; i++, page++) {
6873-
VM_WARN_ON_ONCE(PageHead(page));
6874-
VM_WARN_ON_ONCE(PageTail(page));
6875-
6876-
if (!is_frozen)
6877-
can_free = put_page_testzero(page);
6878-
6879-
if (can_free)
6880-
can_free = free_pages_prepare(page, 0);
6881-
6882-
if (!can_free) {
6883-
if (start) {
6884-
free_prepared_contig_range(start, page - start);
6885-
start = NULL;
6886-
}
6887-
continue;
6888-
}
6889-
6890-
if (start && memdesc_section(page->flags) != start_sec) {
6891-
free_prepared_contig_range(start, page - start);
6892-
start = page;
6893-
start_sec = memdesc_section(page->flags);
6894-
} else if (!start) {
6895-
start = page;
6896-
start_sec = memdesc_section(page->flags);
6897-
}
6898-
}
6899-
6900-
if (start)
6901-
free_prepared_contig_range(start, page - start);
6902-
}
6903-
6904-
/**
6905-
* __free_contig_range - Free contiguous range of order-0 pages.
6906-
* @pfn: Page frame number of the first page in the range.
6907-
* @nr_pages: Number of pages to free.
6908-
*
6909-
* For each order-0 struct page in the physically contiguous range, put a
6910-
* reference. Free any page who's reference count falls to zero. The
6911-
* implementation is functionally equivalent to, but significantly faster than
6912-
* calling __free_page() for each struct page in a loop.
6913-
*
6914-
* Memory allocated with alloc_pages(order>=1) then subsequently split to
6915-
* order-0 with split_page() is an example of appropriate contiguous pages that
6916-
* can be freed with this API.
6917-
*
6918-
* Context: May be called in interrupt context or while holding a normal
6919-
* spinlock, but not in NMI context or while holding a raw spinlock.
6920-
*/
6921-
void __free_contig_range(unsigned long pfn, unsigned long nr_pages)
6922-
{
6923-
__free_contig_range_common(pfn, nr_pages, false);
6924-
}
6925-
EXPORT_SYMBOL(__free_contig_range);
6926-
69276827
#ifdef CONFIG_CONTIG_ALLOC
69286828
/* Usage: See admin-guide/dynamic-debug-howto.rst */
69296829
static void alloc_contig_dump_pages(struct list_head *page_list)
@@ -7469,7 +7369,8 @@ void free_contig_range(unsigned long pfn, unsigned long nr_pages)
74697369
if (WARN_ON_ONCE(PageHead(pfn_to_page(pfn))))
74707370
return;
74717371

7472-
__free_contig_range(pfn, nr_pages);
7372+
for (; nr_pages--; pfn++)
7373+
__free_page(pfn_to_page(pfn));
74737374
}
74747375
EXPORT_SYMBOL(free_contig_range);
74757376
#endif /* CONFIG_CONTIG_ALLOC */

0 commit comments

Comments
 (0)