net: airoha: Compute number of descriptors according to reserved memory size

In order to not exceed the reserved memory size for hwfd buffers,
compute the number of hwfd buffers/descriptors according to the
reserved memory size and the size of each hwfd buffer (2KB).

Fixes: 3a1ce9e3d0 ("net: airoha: Add the capability to allocate hwfd buffers via reserved-memory")
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Link: https://patch.msgid.link/20250619-airoha-hw-num-desc-v4-1-49600a9b319a@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Lorenzo Bianconi
2025-06-19 09:07:24 +02:00
committed by Jakub Kicinski
parent dccf87ea49
commit edf8afeecf

View File

@@ -1065,19 +1065,13 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
{
int size, index, num_desc = HW_DSCP_NUM;
struct airoha_eth *eth = qdma->eth;
int id = qdma - &eth->qdma[0];
dma_addr_t dma_addr;
const char *name;
int size, index;
u32 status;
size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
return -ENOMEM;
airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
if (!name)
return -ENOMEM;
@@ -1099,8 +1093,12 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
rmem = of_reserved_mem_lookup(np);
of_node_put(np);
dma_addr = rmem->base;
/* Compute the number of hw descriptors according to the
* reserved memory size and the payload buffer size
*/
num_desc = rmem->size / AIROHA_MAX_PACKET_SIZE;
} else {
size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
size = AIROHA_MAX_PACKET_SIZE * num_desc;
if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
GFP_KERNEL))
return -ENOMEM;
@@ -1108,6 +1106,11 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
return -ENOMEM;
airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
@@ -1116,7 +1119,7 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
HW_FWD_DESC_NUM_MASK,
FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
return read_poll_timeout(airoha_qdma_rr, status,