x86/mm/pat: remove old pfnmap tracking interface

We can now get rid of the old interface along with get_pat_info() and
follow_phys().

Link: https://lkml.kernel.org/r/20250512123424.637989-6-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: Ingo Molnar <mingo@kernel.org>	[x86 bits]
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Dave Airlie <airlied@gmail.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Jann Horn <jannh@google.com>
Cc: Jonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Tvrtko Ursulin <tursulin@ursulin.net>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
David Hildenbrand
2025-05-12 14:34:18 +02:00
committed by Andrew Morton
parent f8e97613fe
commit 7bd7d74ec0
2 changed files with 0 additions and 213 deletions

View File

@@ -933,119 +933,6 @@ static void free_pfn_range(u64 paddr, unsigned long size)
memtype_free(paddr, paddr + size);
}
static int follow_phys(struct vm_area_struct *vma, unsigned long *prot,
resource_size_t *phys)
{
struct follow_pfnmap_args args = { .vma = vma, .address = vma->vm_start };
if (follow_pfnmap_start(&args))
return -EINVAL;
/* Never return PFNs of anon folios in COW mappings. */
if (!args.special) {
follow_pfnmap_end(&args);
return -EINVAL;
}
*prot = pgprot_val(args.pgprot);
*phys = (resource_size_t)args.pfn << PAGE_SHIFT;
follow_pfnmap_end(&args);
return 0;
}
static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
pgprot_t *pgprot)
{
unsigned long prot;
VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT));
/*
* We need the starting PFN and cachemode used for track_pfn_remap()
* that covered the whole VMA. For most mappings, we can obtain that
* information from the page tables. For COW mappings, we might now
* suddenly have anon folios mapped and follow_phys() will fail.
*
* Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to
* detect the PFN. If we need the cachemode as well, we're out of luck
* for now and have to fail fork().
*/
if (!follow_phys(vma, &prot, paddr)) {
if (pgprot)
*pgprot = __pgprot(prot);
return 0;
}
if (is_cow_mapping(vma->vm_flags)) {
if (pgprot)
return -EINVAL;
*paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
return 0;
}
WARN_ON_ONCE(1);
return -EINVAL;
}
int track_pfn_copy(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma, unsigned long *pfn)
{
const unsigned long vma_size = src_vma->vm_end - src_vma->vm_start;
resource_size_t paddr;
pgprot_t pgprot;
int rc;
if (!(src_vma->vm_flags & VM_PAT))
return 0;
/*
* Duplicate the PAT information for the dst VMA based on the src
* VMA.
*/
if (get_pat_info(src_vma, &paddr, &pgprot))
return -EINVAL;
rc = reserve_pfn_range(paddr, vma_size, &pgprot, 1);
if (rc)
return rc;
/* Reservation for the destination VMA succeeded. */
vm_flags_set(dst_vma, VM_PAT);
*pfn = PHYS_PFN(paddr);
return 0;
}
void untrack_pfn_copy(struct vm_area_struct *dst_vma, unsigned long pfn)
{
untrack_pfn(dst_vma, pfn, dst_vma->vm_end - dst_vma->vm_start, true);
/*
* Reservation was freed, any copied page tables will get cleaned
* up later, but without getting PAT involved again.
*/
}
/*
* prot is passed in as a parameter for the new mapping. If the vma has
* a linear pfn mapping for the entire range, or no vma is provided,
* reserve the entire pfn + size range with single reserve_pfn_range
* call.
*/
int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long addr, unsigned long size)
{
resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
/* reserve the whole chunk starting from paddr */
if (!vma || (addr == vma->vm_start
&& size == (vma->vm_end - vma->vm_start))) {
int ret;
ret = reserve_pfn_range(paddr, size, prot, 0);
if (ret == 0 && vma)
vm_flags_set(vma, VM_PAT);
return ret;
}
return pfnmap_setup_cachemode(pfn, size, prot);
}
int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, pgprot_t *prot)
{
resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
@@ -1082,40 +969,6 @@ void pfnmap_untrack(unsigned long pfn, unsigned long size)
free_pfn_range(paddr, size);
}
/*
* untrack_pfn is called while unmapping a pfnmap for a region.
* untrack can be called for a specific region indicated by pfn and size or
* can be for the entire vma (in which case pfn, size are zero).
*/
void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size, bool mm_wr_locked)
{
resource_size_t paddr;
if (vma && !(vma->vm_flags & VM_PAT))
return;
/* free the chunk starting from pfn or the whole chunk */
paddr = (resource_size_t)pfn << PAGE_SHIFT;
if (!paddr && !size) {
if (get_pat_info(vma, &paddr, NULL))
return;
size = vma->vm_end - vma->vm_start;
}
free_pfn_range(paddr, size);
if (vma) {
if (mm_wr_locked)
vm_flags_clear(vma, VM_PAT);
else
__vm_flags_mod(vma, 0, VM_PAT);
}
}
void untrack_pfn_clear(struct vm_area_struct *vma)
{
vm_flags_clear(vma, VM_PAT);
}
pgprot_t pgprot_writecombine(pgprot_t prot)
{
pgprot_set_cachemode(&prot, _PAGE_CACHE_MODE_WC);

View File

@@ -1485,17 +1485,6 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
* vmf_insert_pfn.
*/
/*
* track_pfn_remap is called when a _new_ pfn mapping is being established
* by remap_pfn_range() for physical range indicated by pfn and size.
*/
static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long addr,
unsigned long size)
{
return 0;
}
static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
pgprot_t *prot)
{
@@ -1511,55 +1500,7 @@ static inline int pfnmap_track(unsigned long pfn, unsigned long size,
static inline void pfnmap_untrack(unsigned long pfn, unsigned long size)
{
}
/*
* track_pfn_copy is called when a VM_PFNMAP VMA is about to get the page
* tables copied during copy_page_range(). Will store the pfn to be
* passed to untrack_pfn_copy() only if there is something to be untracked.
* Callers should initialize the pfn to 0.
*/
static inline int track_pfn_copy(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma, unsigned long *pfn)
{
return 0;
}
/*
* untrack_pfn_copy is called when a VM_PFNMAP VMA failed to copy during
* copy_page_range(), but after track_pfn_copy() was already called. Can
* be called even if track_pfn_copy() did not actually track anything:
* handled internally.
*/
static inline void untrack_pfn_copy(struct vm_area_struct *dst_vma,
unsigned long pfn)
{
}
/*
* untrack_pfn is called while unmapping a pfnmap for a region.
* untrack can be called for a specific region indicated by pfn and size or
* can be for the entire vma (in which case pfn, size are zero).
*/
static inline void untrack_pfn(struct vm_area_struct *vma,
unsigned long pfn, unsigned long size,
bool mm_wr_locked)
{
}
/*
* untrack_pfn_clear is called in the following cases on a VM_PFNMAP VMA:
*
* 1) During mremap() on the src VMA after the page tables were moved.
* 2) During fork() on the dst VMA, immediately after duplicating the src VMA.
*/
static inline void untrack_pfn_clear(struct vm_area_struct *vma)
{
}
#else
extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long addr,
unsigned long size);
/**
* pfnmap_setup_cachemode - setup the cachemode in the pgprot for a pfn range
* @pfn: the start of the pfn range
@@ -1614,13 +1555,6 @@ int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot);
* Untrack a pfn range previously tracked through pfnmap_track().
*/
void pfnmap_untrack(unsigned long pfn, unsigned long size);
extern int track_pfn_copy(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma, unsigned long *pfn);
extern void untrack_pfn_copy(struct vm_area_struct *dst_vma,
unsigned long pfn);
extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size, bool mm_wr_locked);
extern void untrack_pfn_clear(struct vm_area_struct *vma);
#endif
/**