mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 15:03:52 +08:00
mm/huge_memory: move folio split common code to __folio_split()
This is a preparation patch for folio_split(). In the upcoming patch folio_split() will share folio unmapping and remapping code with split_huge_page_to_list_to_order(), so move the code to a common function __folio_split() first. Add a TODO for splitting large shmem folio in swap cache. Link: https://lkml.kernel.org/r/20250307174001.242794-4-ziy@nvidia.com Signed-off-by: Zi Yan <ziy@nvidia.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Kirill A. Shuemov <kirill.shutemov@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Yang Shi <yang@os.amperecomputing.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Kairui Song <kasong@tencent.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
112
mm/huge_memory.c
112
mm/huge_memory.c
@@ -3869,57 +3869,9 @@ after_split:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function splits a large folio into smaller folios of order @new_order.
|
||||
* @page can point to any page of the large folio to split. The split operation
|
||||
* does not change the position of @page.
|
||||
*
|
||||
* Prerequisites:
|
||||
*
|
||||
* 1) The caller must hold a reference on the @page's owning folio, also known
|
||||
* as the large folio.
|
||||
*
|
||||
* 2) The large folio must be locked.
|
||||
*
|
||||
* 3) The folio must not be pinned. Any unexpected folio references, including
|
||||
* GUP pins, will result in the folio not getting split; instead, the caller
|
||||
* will receive an -EAGAIN.
|
||||
*
|
||||
* 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
|
||||
* supported for non-file-backed folios, because folio->_deferred_list, which
|
||||
* is used by partially mapped folios, is stored in subpage 2, but an order-1
|
||||
* folio only has subpages 0 and 1. File-backed order-1 folios are supported,
|
||||
* since they do not use _deferred_list.
|
||||
*
|
||||
* After splitting, the caller's folio reference will be transferred to @page,
|
||||
* resulting in a raised refcount of @page after this call. The other pages may
|
||||
* be freed if they are not mapped.
|
||||
*
|
||||
* If @list is null, tail pages will be added to LRU list, otherwise, to @list.
|
||||
*
|
||||
* Pages in @new_order will inherit the mapping, flags, and so on from the
|
||||
* huge page.
|
||||
*
|
||||
* Returns 0 if the huge page was split successfully.
|
||||
*
|
||||
* Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
|
||||
* the folio was concurrently removed from the page cache.
|
||||
*
|
||||
* Returns -EBUSY when trying to split the huge zeropage, if the folio is
|
||||
* under writeback, if fs-specific folio metadata cannot currently be
|
||||
* released, or if some unexpected race happened (e.g., anon VMA disappeared,
|
||||
* truncation).
|
||||
*
|
||||
* Callers should ensure that the order respects the address space mapping
|
||||
* min-order if one is set for non-anonymous folios.
|
||||
*
|
||||
* Returns -EINVAL when trying to split to an order that is incompatible
|
||||
* with the folio. Splitting to order 0 is compatible with all folios.
|
||||
*/
|
||||
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
|
||||
unsigned int new_order)
|
||||
static int __folio_split(struct folio *folio, unsigned int new_order,
|
||||
struct page *page, struct list_head *list)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
|
||||
/* reset xarray order to new order after split */
|
||||
XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
|
||||
@@ -3995,6 +3947,11 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
|
||||
mapping = folio->mapping;
|
||||
|
||||
/* Truncated ? */
|
||||
/*
|
||||
* TODO: add support for large shmem folio in swap cache.
|
||||
* When shmem is in swap cache, mapping is NULL and
|
||||
* folio_test_swapcache() is true.
|
||||
*/
|
||||
if (!mapping) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
@@ -4129,6 +4086,61 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function splits a large folio into smaller folios of order @new_order.
|
||||
* @page can point to any page of the large folio to split. The split operation
|
||||
* does not change the position of @page.
|
||||
*
|
||||
* Prerequisites:
|
||||
*
|
||||
* 1) The caller must hold a reference on the @page's owning folio, also known
|
||||
* as the large folio.
|
||||
*
|
||||
* 2) The large folio must be locked.
|
||||
*
|
||||
* 3) The folio must not be pinned. Any unexpected folio references, including
|
||||
* GUP pins, will result in the folio not getting split; instead, the caller
|
||||
* will receive an -EAGAIN.
|
||||
*
|
||||
* 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
|
||||
* supported for non-file-backed folios, because folio->_deferred_list, which
|
||||
* is used by partially mapped folios, is stored in subpage 2, but an order-1
|
||||
* folio only has subpages 0 and 1. File-backed order-1 folios are supported,
|
||||
* since they do not use _deferred_list.
|
||||
*
|
||||
* After splitting, the caller's folio reference will be transferred to @page,
|
||||
* resulting in a raised refcount of @page after this call. The other pages may
|
||||
* be freed if they are not mapped.
|
||||
*
|
||||
* If @list is null, tail pages will be added to LRU list, otherwise, to @list.
|
||||
*
|
||||
* Pages in @new_order will inherit the mapping, flags, and so on from the
|
||||
* huge page.
|
||||
*
|
||||
* Returns 0 if the huge page was split successfully.
|
||||
*
|
||||
* Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
|
||||
* the folio was concurrently removed from the page cache.
|
||||
*
|
||||
* Returns -EBUSY when trying to split the huge zeropage, if the folio is
|
||||
* under writeback, if fs-specific folio metadata cannot currently be
|
||||
* released, or if some unexpected race happened (e.g., anon VMA disappeared,
|
||||
* truncation).
|
||||
*
|
||||
* Callers should ensure that the order respects the address space mapping
|
||||
* min-order if one is set for non-anonymous folios.
|
||||
*
|
||||
* Returns -EINVAL when trying to split to an order that is incompatible
|
||||
* with the folio. Splitting to order 0 is compatible with all folios.
|
||||
*/
|
||||
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
|
||||
unsigned int new_order)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
return __folio_split(folio, new_order, page, list);
|
||||
}
|
||||
|
||||
int min_order_for_split(struct folio *folio)
|
||||
{
|
||||
if (folio_test_anon(folio))
|
||||
|
||||
Reference in New Issue
Block a user