iommu/vtd: Remove iommu_alloc_pages_node()

Intel is the only thing that uses this now, convert to the size versions,
trying to avoid PAGE_SHIFT.

Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/23-v4-c8663abbb606+3f7-iommu_pages_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Jason Gunthorpe
2025-04-08 13:54:11 -03:00
committed by Joerg Roedel
parent c3b42b6ffa
commit 249d3327f0
5 changed files with 11 additions and 26 deletions

View File

@@ -493,14 +493,13 @@ struct q_inval {
/* Page Request Queue depth */
#define PRQ_ORDER 4
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
#define PRQ_SIZE (SZ_4K << PRQ_ORDER)
#define PRQ_RING_MASK (PRQ_SIZE - 0x20)
#define PRQ_DEPTH (PRQ_SIZE >> 5)
struct dmar_pci_notify_info;
#ifdef CONFIG_IRQ_REMAP
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf

View File

@@ -530,11 +530,11 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (!ir_table)
return -ENOMEM;
ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL,
INTR_REMAP_PAGE_ORDER);
/* 1MB - maximum possible interrupt remapping table size */
ir_table_base =
iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, SZ_1M);
if (!ir_table_base) {
pr_err("IR%d: failed to allocate pages of order %d\n",
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
pr_err("IR%d: failed to allocate 1M of pages\n", iommu->seq_id);
goto out_free_table;
}

View File

@@ -60,7 +60,8 @@ int intel_pasid_alloc_table(struct device *dev)
size = max_pasid >> (PASID_PDE_SHIFT - 3);
order = size ? get_order(size) : 0;
dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order);
dir = iommu_alloc_pages_node_sz(info->iommu->node, GFP_KERNEL,
1 << (order + PAGE_SHIFT));
if (!dir) {
kfree(pasid_table);
return -ENOMEM;

View File

@@ -290,7 +290,8 @@ int intel_iommu_enable_prq(struct intel_iommu *iommu)
struct iopf_queue *iopfq;
int irq, ret;
iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
iommu->prq =
iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, PRQ_SIZE);
if (!iommu->prq) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);

View File

@@ -84,22 +84,6 @@ static inline bool iommu_pages_list_empty(struct iommu_pages_list *list)
return list_empty(&list->pages);
}
/**
* iommu_alloc_pages_node - Allocate a zeroed page of a given order from
* specific NUMA node
* @nid: memory NUMA node id
* @gfp: buddy allocator flags
* @order: page order
*
* Returns the virtual address of the allocated page.
* Prefer to use iommu_alloc_pages_node_lg2()
*/
static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp,
unsigned int order)
{
return iommu_alloc_pages_node_sz(nid, gfp, 1 << (order + PAGE_SHIFT));
}
/**
* iommu_alloc_pages_sz - Allocate a zeroed page of a given size from
* specific NUMA node