mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 15:03:52 +08:00
eth: fbnic: allocate unreadable page pool for the payloads
Allow allocating a page pool with unreadable memory for the payload ring (sub1). We need to provide the queue ID so that the memory provider can match the PP. Use the appropriate page pool DMA sync helper. For unreadable mem the direction has to be FROM_DEVICE. The default is BIDIR for XDP, but obviously unreadable mem is not compatible with XDP in the first place, so that's fine. While at it remove the define for page pool flags. The rxq_idx is passed to fbnic_alloc_rx_qt_resources() explicitly to make it easy to allocate page pools without NAPI (see the patch after the next). Reviewed-by: Mina Almasry <almasrymina@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org> Link: https://patch.msgid.link/20250901211214.1027927-12-kuba@kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
committed by
Paolo Abeni
parent
3ceb08838b
commit
8a11010fdd
@@ -997,9 +997,8 @@ static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
|
||||
FBNIC_BD_FRAG_SIZE;
|
||||
|
||||
/* Sync DMA buffer */
|
||||
dma_sync_single_range_for_cpu(nv->dev,
|
||||
page_pool_get_dma_addr_netmem(netmem),
|
||||
pg_off, truesize, DMA_BIDIRECTIONAL);
|
||||
page_pool_dma_sync_netmem_for_cpu(qt->sub1.page_pool, netmem,
|
||||
pg_off, truesize);
|
||||
|
||||
added = xdp_buff_add_frag(&pkt->buff, netmem, pg_off, len, truesize);
|
||||
if (unlikely(!added)) {
|
||||
@@ -1515,16 +1514,14 @@ void fbnic_free_napi_vectors(struct fbnic_net *fbn)
|
||||
fbnic_free_napi_vector(fbn, fbn->napi[i]);
|
||||
}
|
||||
|
||||
#define FBNIC_PAGE_POOL_FLAGS \
|
||||
(PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
|
||||
|
||||
static int
|
||||
fbnic_alloc_qt_page_pools(struct fbnic_net *fbn, struct fbnic_napi_vector *nv,
|
||||
struct fbnic_q_triad *qt)
|
||||
struct fbnic_q_triad *qt, unsigned int rxq_idx)
|
||||
{
|
||||
struct page_pool_params pp_params = {
|
||||
.order = 0,
|
||||
.flags = FBNIC_PAGE_POOL_FLAGS,
|
||||
.flags = PP_FLAG_DMA_MAP |
|
||||
PP_FLAG_DMA_SYNC_DEV,
|
||||
.pool_size = fbn->hpq_size + fbn->ppq_size,
|
||||
.nid = NUMA_NO_NODE,
|
||||
.dev = nv->dev,
|
||||
@@ -1533,6 +1530,7 @@ fbnic_alloc_qt_page_pools(struct fbnic_net *fbn, struct fbnic_napi_vector *nv,
|
||||
.max_len = PAGE_SIZE,
|
||||
.napi = &nv->napi,
|
||||
.netdev = fbn->netdev,
|
||||
.queue_idx = rxq_idx,
|
||||
};
|
||||
struct page_pool *pp;
|
||||
|
||||
@@ -1553,10 +1551,23 @@ fbnic_alloc_qt_page_pools(struct fbnic_net *fbn, struct fbnic_napi_vector *nv,
|
||||
return PTR_ERR(pp);
|
||||
|
||||
qt->sub0.page_pool = pp;
|
||||
page_pool_get(pp);
|
||||
if (netif_rxq_has_unreadable_mp(fbn->netdev, rxq_idx)) {
|
||||
pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
|
||||
pp_params.dma_dir = DMA_FROM_DEVICE;
|
||||
|
||||
pp = page_pool_create(&pp_params);
|
||||
if (IS_ERR(pp))
|
||||
goto err_destroy_sub0;
|
||||
} else {
|
||||
page_pool_get(pp);
|
||||
}
|
||||
qt->sub1.page_pool = pp;
|
||||
|
||||
return 0;
|
||||
|
||||
err_destroy_sub0:
|
||||
page_pool_destroy(pp);
|
||||
return PTR_ERR(pp);
|
||||
}
|
||||
|
||||
static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
|
||||
@@ -1961,7 +1972,7 @@ static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
|
||||
struct device *dev = fbn->netdev->dev.parent;
|
||||
int err;
|
||||
|
||||
err = fbnic_alloc_qt_page_pools(fbn, nv, qt);
|
||||
err = fbnic_alloc_qt_page_pools(fbn, nv, qt, qt->cmpl.q_idx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user