mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 15:03:52 +08:00
Merge branch 'gve-xdp-fixes'
Joshua Washington says: ==================== gve: various XDP fixes This patch series contains the following XDP fixes: - clean up XDP tx queue when stopping rings - use RCU synchronization to guard existence of XDP queues - perform XSK TX as part of RX NAPI to fix busy polling - fix XDP allocation issues when non-XDP configurations occur ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -1140,6 +1140,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
|
||||
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
|
||||
bool gve_tx_poll(struct gve_notify_block *block, int budget);
|
||||
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
|
||||
int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
|
||||
int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
|
||||
struct gve_tx_alloc_rings_cfg *cfg);
|
||||
void gve_tx_free_rings_gqi(struct gve_priv *priv,
|
||||
|
||||
@@ -333,6 +333,14 @@ int gve_napi_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
if (block->rx) {
|
||||
work_done = gve_rx_poll(block, budget);
|
||||
|
||||
/* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
|
||||
* TX and RX work done.
|
||||
*/
|
||||
if (priv->xdp_prog)
|
||||
work_done = max_t(int, work_done,
|
||||
gve_xsk_tx_poll(block, budget));
|
||||
|
||||
reschedule |= work_done == budget;
|
||||
}
|
||||
|
||||
@@ -922,11 +930,13 @@ static void gve_init_sync_stats(struct gve_priv *priv)
|
||||
static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
|
||||
struct gve_tx_alloc_rings_cfg *cfg)
|
||||
{
|
||||
int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0;
|
||||
|
||||
cfg->qcfg = &priv->tx_cfg;
|
||||
cfg->raw_addressing = !gve_is_qpl(priv);
|
||||
cfg->ring_size = priv->tx_desc_cnt;
|
||||
cfg->start_idx = 0;
|
||||
cfg->num_rings = gve_num_tx_queues(priv);
|
||||
cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues;
|
||||
cfg->tx = priv->tx;
|
||||
}
|
||||
|
||||
@@ -1623,8 +1633,8 @@ static int gve_xsk_pool_enable(struct net_device *dev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* If XDP prog is not installed, return */
|
||||
if (!priv->xdp_prog)
|
||||
/* If XDP prog is not installed or interface is down, return. */
|
||||
if (!priv->xdp_prog || !netif_running(dev))
|
||||
return 0;
|
||||
|
||||
rx = &priv->rx[qid];
|
||||
@@ -1669,21 +1679,16 @@ static int gve_xsk_pool_disable(struct net_device *dev,
|
||||
if (qid >= priv->rx_cfg.num_queues)
|
||||
return -EINVAL;
|
||||
|
||||
/* If XDP prog is not installed, unmap DMA and return */
|
||||
if (!priv->xdp_prog)
|
||||
/* If XDP prog is not installed or interface is down, unmap DMA and
|
||||
* return.
|
||||
*/
|
||||
if (!priv->xdp_prog || !netif_running(dev))
|
||||
goto done;
|
||||
|
||||
tx_qid = gve_xdp_tx_queue_id(priv, qid);
|
||||
if (!netif_running(dev)) {
|
||||
priv->rx[qid].xsk_pool = NULL;
|
||||
xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
|
||||
priv->tx[tx_qid].xsk_pool = NULL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
|
||||
napi_disable(napi_rx); /* make sure current rx poll is done */
|
||||
|
||||
tx_qid = gve_xdp_tx_queue_id(priv, qid);
|
||||
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
|
||||
napi_disable(napi_tx); /* make sure current tx poll is done */
|
||||
|
||||
@@ -1711,6 +1716,9 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
|
||||
struct gve_priv *priv = netdev_priv(dev);
|
||||
int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
|
||||
|
||||
if (!gve_get_napi_enabled(priv))
|
||||
return -ENETDOWN;
|
||||
|
||||
if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -1837,6 +1845,7 @@ int gve_adjust_queues(struct gve_priv *priv,
|
||||
{
|
||||
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
|
||||
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
|
||||
int num_xdp_queues;
|
||||
int err;
|
||||
|
||||
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
|
||||
@@ -1847,6 +1856,10 @@ int gve_adjust_queues(struct gve_priv *priv,
|
||||
rx_alloc_cfg.qcfg = &new_rx_config;
|
||||
tx_alloc_cfg.num_rings = new_tx_config.num_queues;
|
||||
|
||||
/* Add dedicated XDP TX queues if enabled. */
|
||||
num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0;
|
||||
tx_alloc_cfg.num_rings += num_xdp_queues;
|
||||
|
||||
if (netif_running(priv->dev)) {
|
||||
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
|
||||
return err;
|
||||
@@ -1899,6 +1912,9 @@ static void gve_turndown(struct gve_priv *priv)
|
||||
|
||||
gve_clear_napi_enabled(priv);
|
||||
gve_clear_report_stats(priv);
|
||||
|
||||
/* Make sure that all traffic is finished processing. */
|
||||
synchronize_net();
|
||||
}
|
||||
|
||||
static void gve_turnup(struct gve_priv *priv)
|
||||
|
||||
@@ -206,7 +206,10 @@ void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
|
||||
return;
|
||||
|
||||
gve_remove_napi(priv, ntfy_idx);
|
||||
gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
|
||||
if (tx->q_num < priv->tx_cfg.num_queues)
|
||||
gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
|
||||
else
|
||||
gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
|
||||
netdev_tx_reset_queue(tx->netdev_txq);
|
||||
gve_tx_remove_from_block(priv, idx);
|
||||
}
|
||||
@@ -834,9 +837,12 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
struct gve_tx_ring *tx;
|
||||
int i, err = 0, qid;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
|
||||
return -EINVAL;
|
||||
|
||||
if (!gve_get_napi_enabled(priv))
|
||||
return -ENETDOWN;
|
||||
|
||||
qid = gve_xdp_tx_queue_id(priv,
|
||||
smp_processor_id() % priv->num_xdp_queues);
|
||||
|
||||
@@ -975,33 +981,41 @@ out:
|
||||
return sent;
|
||||
}
|
||||
|
||||
int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
|
||||
{
|
||||
struct gve_rx_ring *rx = rx_block->rx;
|
||||
struct gve_priv *priv = rx->gve;
|
||||
struct gve_tx_ring *tx;
|
||||
int sent = 0;
|
||||
|
||||
tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
|
||||
if (tx->xsk_pool) {
|
||||
sent = gve_xsk_tx(priv, tx, budget);
|
||||
|
||||
u64_stats_update_begin(&tx->statss);
|
||||
tx->xdp_xsk_sent += sent;
|
||||
u64_stats_update_end(&tx->statss);
|
||||
if (xsk_uses_need_wakeup(tx->xsk_pool))
|
||||
xsk_set_tx_need_wakeup(tx->xsk_pool);
|
||||
}
|
||||
|
||||
return sent;
|
||||
}
|
||||
|
||||
bool gve_xdp_poll(struct gve_notify_block *block, int budget)
|
||||
{
|
||||
struct gve_priv *priv = block->priv;
|
||||
struct gve_tx_ring *tx = block->tx;
|
||||
u32 nic_done;
|
||||
bool repoll;
|
||||
u32 to_do;
|
||||
|
||||
/* Find out how much work there is to be done */
|
||||
nic_done = gve_tx_load_event_counter(priv, tx);
|
||||
to_do = min_t(u32, (nic_done - tx->done), budget);
|
||||
gve_clean_xdp_done(priv, tx, to_do);
|
||||
repoll = nic_done != tx->done;
|
||||
|
||||
if (tx->xsk_pool) {
|
||||
int sent = gve_xsk_tx(priv, tx, budget);
|
||||
|
||||
u64_stats_update_begin(&tx->statss);
|
||||
tx->xdp_xsk_sent += sent;
|
||||
u64_stats_update_end(&tx->statss);
|
||||
repoll |= (sent == budget);
|
||||
if (xsk_uses_need_wakeup(tx->xsk_pool))
|
||||
xsk_set_tx_need_wakeup(tx->xsk_pool);
|
||||
}
|
||||
|
||||
/* If we still have work we want to repoll */
|
||||
return repoll;
|
||||
return nic_done != tx->done;
|
||||
}
|
||||
|
||||
bool gve_tx_poll(struct gve_notify_block *block, int budget)
|
||||
|
||||
Reference in New Issue
Block a user