mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 15:03:52 +08:00
Merge tag 'net-6.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski:
"Including fixes from netfilter, Bluetooth and WPAN.
No outstanding fixes / investigations at this time.
Current release - new code bugs:
- eth: fbnic: revert HWMON support, it doesn't work at all and revert
is similar size as the fixes
Previous releases - regressions:
- tcp: allow a connection when sk_max_ack_backlog is zero
- tls: fix tls_sw_sendmsg error handling
Previous releases - always broken:
- netdev netlink family:
- prevent accessing NAPI instances from another namespace
- don't dump Tx and uninitialized NAPIs
- net: sysctl: avoid using current->nsproxy, fix null-deref if task
is exiting and stick to opener's netns
- sched: sch_cake: add bounds checks to host bulk flow fairness
counts
Misc:
- annual cleanup of inactive maintainers"
* tag 'net-6.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (57 commits)
rds: sysctl: rds_tcp_{rcv,snd}buf: avoid using current->nsproxy
sctp: sysctl: plpmtud_probe_interval: avoid using current->nsproxy
sctp: sysctl: udp_port: avoid using current->nsproxy
sctp: sysctl: auth_enable: avoid using current->nsproxy
sctp: sysctl: rto_min/max: avoid using current->nsproxy
sctp: sysctl: cookie_hmac_alg: avoid using current->nsproxy
mptcp: sysctl: blackhole timeout: avoid using current->nsproxy
mptcp: sysctl: sched: avoid using current->nsproxy
mptcp: sysctl: avail sched: remove write access
MAINTAINERS: remove Lars Povlsen from Microchip Sparx5 SoC
MAINTAINERS: remove Noam Dagan from AMAZON ETHERNET
MAINTAINERS: remove Ying Xue from TIPC
MAINTAINERS: remove Mark Lee from MediaTek Ethernet
MAINTAINERS: mark stmmac ethernet as an Orphan
MAINTAINERS: remove Andy Gospodarek from bonding
MAINTAINERS: update maintainers for Microchip LAN78xx
MAINTAINERS: mark Synopsys DW XPCS as Orphan
net/mlx5: Fix variable not being completed when function returns
rtase: Fix a check for error in rtase_alloc_msix()
net: stmmac: dwmac-tegra: Read iommu stream id from device tree
...
This commit is contained in:
12
CREDITS
12
CREDITS
@@ -20,6 +20,10 @@ N: Thomas Abraham
|
||||
E: thomas.ab@samsung.com
|
||||
D: Samsung pin controller driver
|
||||
|
||||
N: Jose Abreu
|
||||
E: jose.abreu@synopsys.com
|
||||
D: Synopsys DesignWare XPCS MDIO/PCS driver.
|
||||
|
||||
N: Dragos Acostachioaie
|
||||
E: dragos@iname.com
|
||||
W: http://www.arbornet.org/~dragos
|
||||
@@ -1428,6 +1432,10 @@ S: 8124 Constitution Apt. 7
|
||||
S: Sterling Heights, Michigan 48313
|
||||
S: USA
|
||||
|
||||
N: Andy Gospodarek
|
||||
E: andy@greyhouse.net
|
||||
D: Maintenance and contributions to the network interface bonding driver.
|
||||
|
||||
N: Wolfgang Grandegger
|
||||
E: wg@grandegger.com
|
||||
D: Controller Area Network (device drivers)
|
||||
@@ -1812,6 +1820,10 @@ D: Author/maintainer of most DRM drivers (especially ATI, MGA)
|
||||
D: Core DRM templates, general DRM and 3D-related hacking
|
||||
S: No fixed address
|
||||
|
||||
N: Woojung Huh
|
||||
E: woojung.huh@microchip.com
|
||||
D: Microchip LAN78XX USB Ethernet driver
|
||||
|
||||
N: Kenn Humborg
|
||||
E: kenn@wombat.ie
|
||||
D: Mods to loop device to support sparse backing files
|
||||
|
||||
@@ -81,7 +81,7 @@ properties:
|
||||
List of phandles, each pointing to the power supply for the
|
||||
corresponding pairset named in 'pairset-names'. This property
|
||||
aligns with IEEE 802.3-2022, Section 33.2.3 and 145.2.4.
|
||||
PSE Pinout Alternatives (as per IEEE 802.3-2022 Table 145\u20133)
|
||||
PSE Pinout Alternatives (as per IEEE 802.3-2022 Table 145-3)
|
||||
|-----------|---------------|---------------|---------------|---------------|
|
||||
| Conductor | Alternative A | Alternative A | Alternative B | Alternative B |
|
||||
| | (MDI-X) | (MDI) | (X) | (S) |
|
||||
|
||||
16
MAINTAINERS
16
MAINTAINERS
@@ -949,7 +949,6 @@ AMAZON ETHERNET DRIVERS
|
||||
M: Shay Agroskin <shayagr@amazon.com>
|
||||
M: Arthur Kiyanovski <akiyano@amazon.com>
|
||||
R: David Arinzon <darinzon@amazon.com>
|
||||
R: Noam Dagan <ndagan@amazon.com>
|
||||
R: Saeed Bishara <saeedb@amazon.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
@@ -2690,7 +2689,6 @@ N: at91
|
||||
N: atmel
|
||||
|
||||
ARM/Microchip Sparx5 SoC support
|
||||
M: Lars Povlsen <lars.povlsen@microchip.com>
|
||||
M: Steen Hegelund <Steen.Hegelund@microchip.com>
|
||||
M: Daniel Machon <daniel.machon@microchip.com>
|
||||
M: UNGLinuxDriver@microchip.com
|
||||
@@ -4058,7 +4056,6 @@ F: net/bluetooth/
|
||||
|
||||
BONDING DRIVER
|
||||
M: Jay Vosburgh <jv@jvosburgh.net>
|
||||
M: Andy Gospodarek <andy@greyhouse.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/networking/bonding.rst
|
||||
@@ -14566,7 +14563,6 @@ F: drivers/dma/mediatek/
|
||||
MEDIATEK ETHERNET DRIVER
|
||||
M: Felix Fietkau <nbd@nbd.name>
|
||||
M: Sean Wang <sean.wang@mediatek.com>
|
||||
M: Mark Lee <Mark-MC.Lee@mediatek.com>
|
||||
M: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
@@ -22503,11 +22499,8 @@ F: Documentation/devicetree/bindings/phy/st,stm32mp25-combophy.yaml
|
||||
F: drivers/phy/st/phy-stm32-combophy.c
|
||||
|
||||
STMMAC ETHERNET DRIVER
|
||||
M: Alexandre Torgue <alexandre.torgue@foss.st.com>
|
||||
M: Jose Abreu <joabreu@synopsys.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.stlinux.com
|
||||
S: Orphan
|
||||
F: Documentation/networking/device_drivers/ethernet/stmicro/
|
||||
F: drivers/net/ethernet/stmicro/stmmac/
|
||||
|
||||
@@ -22739,9 +22732,8 @@ S: Supported
|
||||
F: drivers/net/ethernet/synopsys/
|
||||
|
||||
SYNOPSYS DESIGNWARE ETHERNET XPCS DRIVER
|
||||
M: Jose Abreu <Jose.Abreu@synopsys.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
S: Orphan
|
||||
F: drivers/net/pcs/pcs-xpcs.c
|
||||
F: drivers/net/pcs/pcs-xpcs.h
|
||||
F: include/linux/pcs/pcs-xpcs.h
|
||||
@@ -23649,7 +23641,6 @@ F: tools/testing/selftests/timers/
|
||||
|
||||
TIPC NETWORK LAYER
|
||||
M: Jon Maloy <jmaloy@redhat.com>
|
||||
M: Ying Xue <ying.xue@windriver.com>
|
||||
L: netdev@vger.kernel.org (core kernel code)
|
||||
L: tipc-discussion@lists.sourceforge.net (user apps, general discussion)
|
||||
S: Maintained
|
||||
@@ -24255,7 +24246,8 @@ F: Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
|
||||
F: drivers/usb/isp1760/*
|
||||
|
||||
USB LAN78XX ETHERNET DRIVER
|
||||
M: Woojung Huh <woojung.huh@microchip.com>
|
||||
M: Thangaraj Samynathan <Thangaraj.S@microchip.com>
|
||||
M: Rengarajan Sundararajan <Rengarajan.S@microchip.com>
|
||||
M: UNGLinuxDriver@microchip.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
@@ -1472,10 +1472,15 @@ EXPORT_SYMBOL_GPL(btmtk_usb_setup);
|
||||
|
||||
int btmtk_usb_shutdown(struct hci_dev *hdev)
|
||||
{
|
||||
struct btmtk_data *data = hci_get_priv(hdev);
|
||||
struct btmtk_hci_wmt_params wmt_params;
|
||||
u8 param = 0;
|
||||
int err;
|
||||
|
||||
err = usb_autopm_get_interface(data->intf);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* Disable the device */
|
||||
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
|
||||
wmt_params.flag = 0;
|
||||
@@ -1486,9 +1491,11 @@ int btmtk_usb_shutdown(struct hci_dev *hdev)
|
||||
err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
|
||||
if (err < 0) {
|
||||
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
|
||||
usb_autopm_put_interface(data->intf);
|
||||
return err;
|
||||
}
|
||||
|
||||
usb_autopm_put_interface(data->intf);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
|
||||
|
||||
@@ -1381,6 +1381,7 @@ static void btnxpuart_tx_work(struct work_struct *work)
|
||||
|
||||
while ((skb = nxp_dequeue(nxpdev))) {
|
||||
len = serdev_device_write_buf(serdev, skb->data, skb->len);
|
||||
serdev_device_wait_until_sent(serdev, 0);
|
||||
hdev->stat.byte_tx += len;
|
||||
|
||||
skb_pull(skb, len);
|
||||
|
||||
@@ -118,7 +118,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
|
||||
if (err && err != -EIO)
|
||||
return err;
|
||||
|
||||
listlen = fw_list.num_fw_slots;
|
||||
listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
|
||||
for (i = 0; i < listlen; i++) {
|
||||
if (i < ARRAY_SIZE(fw_slotnames))
|
||||
strscpy(buf, fw_slotnames[i], sizeof(buf));
|
||||
|
||||
@@ -2897,6 +2897,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool bnxt_vnic_is_active(struct bnxt *bp)
|
||||
{
|
||||
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
|
||||
|
||||
return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
|
||||
}
|
||||
|
||||
static irqreturn_t bnxt_msix(int irq, void *dev_instance)
|
||||
{
|
||||
struct bnxt_napi *bnapi = dev_instance;
|
||||
@@ -3164,7 +3171,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (bp->flags & BNXT_FLAG_DIM) {
|
||||
if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
|
||||
struct dim_sample dim_sample = {};
|
||||
|
||||
dim_update_sample(cpr->event_ctr,
|
||||
@@ -3295,7 +3302,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
|
||||
poll_done:
|
||||
cpr_rx = &cpr->cp_ring_arr[0];
|
||||
if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
|
||||
(bp->flags & BNXT_FLAG_DIM)) {
|
||||
(bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
|
||||
struct dim_sample dim_sample = {};
|
||||
|
||||
dim_update_sample(cpr->event_ctr,
|
||||
@@ -7266,6 +7273,26 @@ err_out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnxt_cancel_dim(struct bnxt *bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* DIM work is initialized in bnxt_enable_napi(). Proceed only
|
||||
* if NAPI is enabled.
|
||||
*/
|
||||
if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
|
||||
return;
|
||||
|
||||
/* Make sure NAPI sees that the VNIC is disabled */
|
||||
synchronize_net();
|
||||
for (i = 0; i < bp->rx_nr_rings; i++) {
|
||||
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
||||
struct bnxt_napi *bnapi = rxr->bnapi;
|
||||
|
||||
cancel_work_sync(&bnapi->cp_ring.dim.work);
|
||||
}
|
||||
}
|
||||
|
||||
static int hwrm_ring_free_send_msg(struct bnxt *bp,
|
||||
struct bnxt_ring_struct *ring,
|
||||
u32 ring_type, int cmpl_ring_id)
|
||||
@@ -7366,6 +7393,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
|
||||
}
|
||||
}
|
||||
|
||||
bnxt_cancel_dim(bp);
|
||||
for (i = 0; i < bp->rx_nr_rings; i++) {
|
||||
bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
|
||||
bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
|
||||
@@ -11309,8 +11337,6 @@ static void bnxt_disable_napi(struct bnxt *bp)
|
||||
if (bnapi->in_reset)
|
||||
cpr->sw_stats->rx.rx_resets++;
|
||||
napi_disable(&bnapi->napi);
|
||||
if (bnapi->rx_ring)
|
||||
cancel_work_sync(&cpr->dim.work);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15572,8 +15598,10 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
|
||||
bnxt_hwrm_vnic_update(bp, vnic,
|
||||
VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
|
||||
}
|
||||
|
||||
/* Make sure NAPI sees that the VNIC is disabled */
|
||||
synchronize_net();
|
||||
rxr = &bp->rx_ring[idx];
|
||||
cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
|
||||
bnxt_hwrm_rx_ring_free(bp, rxr, false);
|
||||
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
|
||||
rxr->rx_next_cons = 0;
|
||||
|
||||
@@ -208,7 +208,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
|
||||
|
||||
rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto drop_req;
|
||||
|
||||
hwrm_req_timeout(bp, req, fw_msg->timeout);
|
||||
resp = hwrm_req_hold(bp, req);
|
||||
@@ -220,6 +220,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
|
||||
|
||||
memcpy(fw_msg->resp, resp, resp_len);
|
||||
}
|
||||
drop_req:
|
||||
hwrm_req_drop(bp, req);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -1799,7 +1799,10 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
|
||||
struct adapter *adap = container_of(t, struct adapter, tids);
|
||||
struct sk_buff *skb;
|
||||
|
||||
WARN_ON(tid_out_of_range(&adap->tids, tid));
|
||||
if (tid_out_of_range(&adap->tids, tid)) {
|
||||
dev_err(adap->pdev_dev, "tid %d out of range\n", tid);
|
||||
return;
|
||||
}
|
||||
|
||||
if (t->tid_tab[tid - adap->tids.tid_base]) {
|
||||
t->tid_tab[tid - adap->tids.tid_base] = NULL;
|
||||
|
||||
@@ -2241,14 +2241,18 @@ static void gve_service_task(struct work_struct *work)
|
||||
|
||||
static void gve_set_netdev_xdp_features(struct gve_priv *priv)
|
||||
{
|
||||
xdp_features_t xdp_features;
|
||||
|
||||
if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
|
||||
priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
|
||||
priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
|
||||
priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
|
||||
priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
|
||||
xdp_features = NETDEV_XDP_ACT_BASIC;
|
||||
xdp_features |= NETDEV_XDP_ACT_REDIRECT;
|
||||
xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
|
||||
xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
|
||||
} else {
|
||||
priv->dev->xdp_features = 0;
|
||||
xdp_features = 0;
|
||||
}
|
||||
|
||||
xdp_set_features_flag(priv->dev, xdp_features);
|
||||
}
|
||||
|
||||
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
|
||||
|
||||
@@ -916,9 +916,6 @@ struct hnae3_handle {
|
||||
|
||||
u8 netdev_flags;
|
||||
struct dentry *hnae3_dbgfs;
|
||||
/* protects concurrent contention between debugfs commands */
|
||||
struct mutex dbgfs_lock;
|
||||
char **dbgfs_buf;
|
||||
|
||||
/* Network interface message level enabled bits */
|
||||
u32 msg_enable;
|
||||
|
||||
@@ -1260,69 +1260,55 @@ static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
|
||||
static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct hns3_dbg_data *dbg_data = filp->private_data;
|
||||
char *buf = filp->private_data;
|
||||
|
||||
return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
|
||||
}
|
||||
|
||||
static int hns3_dbg_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct hns3_dbg_data *dbg_data = inode->i_private;
|
||||
struct hnae3_handle *handle = dbg_data->handle;
|
||||
struct hns3_nic_priv *priv = handle->priv;
|
||||
ssize_t size = 0;
|
||||
char **save_buf;
|
||||
char *read_buf;
|
||||
u32 index;
|
||||
char *buf;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
|
||||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
|
||||
return -EBUSY;
|
||||
|
||||
ret = hns3_dbg_get_cmd_index(dbg_data, &index);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&handle->dbgfs_lock);
|
||||
save_buf = &handle->dbgfs_buf[index];
|
||||
buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
|
||||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
|
||||
buf, hns3_dbg_cmd[index].buf_len);
|
||||
if (ret) {
|
||||
kvfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*save_buf) {
|
||||
read_buf = *save_buf;
|
||||
} else {
|
||||
read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
|
||||
if (!read_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
filp->private_data = buf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* save the buffer addr until the last read operation */
|
||||
*save_buf = read_buf;
|
||||
|
||||
/* get data ready for the first time to read */
|
||||
ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
|
||||
read_buf, hns3_dbg_cmd[index].buf_len);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
size = simple_read_from_buffer(buffer, count, ppos, read_buf,
|
||||
strlen(read_buf));
|
||||
if (size > 0) {
|
||||
mutex_unlock(&handle->dbgfs_lock);
|
||||
return size;
|
||||
}
|
||||
|
||||
out:
|
||||
/* free the buffer for the last read operation */
|
||||
if (*save_buf) {
|
||||
kvfree(*save_buf);
|
||||
*save_buf = NULL;
|
||||
}
|
||||
|
||||
mutex_unlock(&handle->dbgfs_lock);
|
||||
return ret;
|
||||
static int hns3_dbg_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
kvfree(filp->private_data);
|
||||
filp->private_data = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations hns3_dbg_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.open = hns3_dbg_open,
|
||||
.read = hns3_dbg_read,
|
||||
.release = hns3_dbg_release,
|
||||
};
|
||||
|
||||
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
|
||||
@@ -1379,13 +1365,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
|
||||
int ret;
|
||||
u32 i;
|
||||
|
||||
handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
|
||||
ARRAY_SIZE(hns3_dbg_cmd),
|
||||
sizeof(*handle->dbgfs_buf),
|
||||
GFP_KERNEL);
|
||||
if (!handle->dbgfs_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
|
||||
debugfs_create_dir(name, hns3_dbgfs_root);
|
||||
handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
|
||||
@@ -1395,8 +1374,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
|
||||
debugfs_create_dir(hns3_dbg_dentry[i].name,
|
||||
handle->hnae3_dbgfs);
|
||||
|
||||
mutex_init(&handle->dbgfs_lock);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
|
||||
if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
|
||||
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
|
||||
@@ -1425,24 +1402,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
|
||||
out:
|
||||
debugfs_remove_recursive(handle->hnae3_dbgfs);
|
||||
handle->hnae3_dbgfs = NULL;
|
||||
mutex_destroy(&handle->dbgfs_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hns3_dbg_uninit(struct hnae3_handle *handle)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
debugfs_remove_recursive(handle->hnae3_dbgfs);
|
||||
handle->hnae3_dbgfs = NULL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
|
||||
if (handle->dbgfs_buf[i]) {
|
||||
kvfree(handle->dbgfs_buf[i]);
|
||||
handle->dbgfs_buf[i] = NULL;
|
||||
}
|
||||
|
||||
mutex_destroy(&handle->dbgfs_lock);
|
||||
}
|
||||
|
||||
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
|
||||
|
||||
@@ -2452,7 +2452,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
netdev->features = features;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netdevice.h>
|
||||
@@ -3574,6 +3575,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclge_set_reset_pending(struct hclge_dev *hdev,
|
||||
enum hnae3_reset_type reset_type)
|
||||
{
|
||||
/* When an incorrect reset type is executed, the get_reset_level
|
||||
* function generates the HNAE3_NONE_RESET flag. As a result, this
|
||||
* type do not need to pending.
|
||||
*/
|
||||
if (reset_type != HNAE3_NONE_RESET)
|
||||
set_bit(reset_type, &hdev->reset_pending);
|
||||
}
|
||||
|
||||
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
{
|
||||
u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
|
||||
@@ -3594,7 +3606,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
*/
|
||||
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
|
||||
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
|
||||
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
|
||||
hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
|
||||
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
|
||||
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
|
||||
hdev->rst_stats.imp_rst_cnt++;
|
||||
@@ -3604,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
|
||||
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
|
||||
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
|
||||
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
|
||||
hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
|
||||
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
|
||||
hdev->rst_stats.global_rst_cnt++;
|
||||
return HCLGE_VECTOR0_EVENT_RST;
|
||||
@@ -3759,7 +3771,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
|
||||
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
|
||||
HCLGE_NAME, pci_name(hdev->pdev));
|
||||
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
|
||||
0, hdev->misc_vector.name, hdev);
|
||||
IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev);
|
||||
if (ret) {
|
||||
hclge_free_vector(hdev, 0);
|
||||
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
|
||||
@@ -4052,7 +4064,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
|
||||
case HNAE3_FUNC_RESET:
|
||||
dev_info(&pdev->dev, "PF reset requested\n");
|
||||
/* schedule again to check later */
|
||||
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
|
||||
hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
|
||||
hclge_reset_task_schedule(hdev);
|
||||
break;
|
||||
default:
|
||||
@@ -4086,6 +4098,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
|
||||
clear_bit(HNAE3_FLR_RESET, addr);
|
||||
}
|
||||
|
||||
clear_bit(HNAE3_NONE_RESET, addr);
|
||||
|
||||
if (hdev->reset_type != HNAE3_NONE_RESET &&
|
||||
rst_level < hdev->reset_type)
|
||||
return HNAE3_NONE_RESET;
|
||||
@@ -4227,7 +4241,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
|
||||
return false;
|
||||
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
|
||||
hdev->rst_stats.reset_fail_cnt++;
|
||||
set_bit(hdev->reset_type, &hdev->reset_pending);
|
||||
hclge_set_reset_pending(hdev, hdev->reset_type);
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"re-schedule reset task(%u)\n",
|
||||
hdev->rst_stats.reset_fail_cnt);
|
||||
@@ -4470,8 +4484,20 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
|
||||
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
|
||||
enum hnae3_reset_type rst_type)
|
||||
{
|
||||
#define HCLGE_SUPPORT_RESET_TYPE \
|
||||
(BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
|
||||
BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
|
||||
|
||||
struct hclge_dev *hdev = ae_dev->priv;
|
||||
|
||||
if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
|
||||
/* To prevent reset triggered by hclge_reset_event */
|
||||
set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
|
||||
dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
|
||||
rst_type);
|
||||
return;
|
||||
}
|
||||
|
||||
set_bit(rst_type, &hdev->default_reset_request);
|
||||
}
|
||||
|
||||
@@ -11881,9 +11907,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
|
||||
hclge_init_rxd_adv_layout(hdev);
|
||||
|
||||
/* Enable MISC vector(vector0) */
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
|
||||
ret = hclge_init_wol(hdev);
|
||||
if (ret)
|
||||
dev_warn(&pdev->dev,
|
||||
@@ -11896,6 +11919,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
hclge_state_init(hdev);
|
||||
hdev->last_reset_time = jiffies;
|
||||
|
||||
/* Enable MISC vector(vector0) */
|
||||
enable_irq(hdev->misc_vector.vector_irq);
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
|
||||
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
|
||||
HCLGE_DRIVER_NAME);
|
||||
|
||||
@@ -12301,7 +12328,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
|
||||
/* Disable MISC vector(vector0) */
|
||||
hclge_enable_vector(&hdev->misc_vector, false);
|
||||
synchronize_irq(hdev->misc_vector.vector_irq);
|
||||
disable_irq(hdev->misc_vector.vector_irq);
|
||||
|
||||
/* Disable all hw interrupts */
|
||||
hclge_config_mac_tnl_int(hdev, false);
|
||||
|
||||
@@ -58,6 +58,9 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hclge_ptp *ptp = hdev->ptp;
|
||||
|
||||
if (!ptp)
|
||||
return false;
|
||||
|
||||
if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
|
||||
test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
|
||||
ptp->tx_skipped++;
|
||||
|
||||
@@ -510,9 +510,9 @@ out:
|
||||
static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
|
||||
struct hnae3_knic_private_info *kinfo)
|
||||
{
|
||||
#define HCLGE_RING_REG_OFFSET 0x200
|
||||
#define HCLGE_RING_INT_REG_OFFSET 0x4
|
||||
|
||||
struct hnae3_queue *tqp;
|
||||
int i, j, reg_num;
|
||||
int data_num_sum;
|
||||
u32 *reg = data;
|
||||
@@ -533,10 +533,11 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
|
||||
reg_num = ARRAY_SIZE(ring_reg_addr_list);
|
||||
for (j = 0; j < kinfo->num_tqps; j++) {
|
||||
reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
|
||||
tqp = kinfo->tqp[j];
|
||||
for (i = 0; i < reg_num; i++)
|
||||
*reg++ = hclge_read_dev(&hdev->hw,
|
||||
ring_reg_addr_list[i] +
|
||||
HCLGE_RING_REG_OFFSET * j);
|
||||
*reg++ = readl_relaxed(tqp->io_base -
|
||||
HCLGE_TQP_REG_OFFSET +
|
||||
ring_reg_addr_list[i]);
|
||||
}
|
||||
data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
|
||||
|
||||
|
||||
@@ -1393,6 +1393,17 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
|
||||
enum hnae3_reset_type reset_type)
|
||||
{
|
||||
/* When an incorrect reset type is executed, the get_reset_level
|
||||
* function generates the HNAE3_NONE_RESET flag. As a result, this
|
||||
* type do not need to pending.
|
||||
*/
|
||||
if (reset_type != HNAE3_NONE_RESET)
|
||||
set_bit(reset_type, &hdev->reset_pending);
|
||||
}
|
||||
|
||||
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
|
||||
{
|
||||
#define HCLGEVF_RESET_WAIT_US 20000
|
||||
@@ -1542,7 +1553,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
|
||||
hdev->rst_stats.rst_fail_cnt);
|
||||
|
||||
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
|
||||
set_bit(hdev->reset_type, &hdev->reset_pending);
|
||||
hclgevf_set_reset_pending(hdev, hdev->reset_type);
|
||||
|
||||
if (hclgevf_is_reset_pending(hdev)) {
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
@@ -1662,6 +1673,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
|
||||
clear_bit(HNAE3_FLR_RESET, addr);
|
||||
}
|
||||
|
||||
clear_bit(HNAE3_NONE_RESET, addr);
|
||||
|
||||
return rst_level;
|
||||
}
|
||||
|
||||
@@ -1671,14 +1684,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
|
||||
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
|
||||
struct hclgevf_dev *hdev = ae_dev->priv;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
|
||||
|
||||
if (hdev->default_reset_request)
|
||||
hdev->reset_level =
|
||||
hclgevf_get_reset_level(&hdev->default_reset_request);
|
||||
else
|
||||
hdev->reset_level = HNAE3_VF_FUNC_RESET;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
|
||||
hdev->reset_level);
|
||||
|
||||
/* reset of this VF requested */
|
||||
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
@@ -1689,8 +1703,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
|
||||
static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
|
||||
enum hnae3_reset_type rst_type)
|
||||
{
|
||||
#define HCLGEVF_SUPPORT_RESET_TYPE \
|
||||
(BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
|
||||
BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
|
||||
BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
|
||||
|
||||
struct hclgevf_dev *hdev = ae_dev->priv;
|
||||
|
||||
if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
|
||||
/* To prevent reset triggered by hclge_reset_event */
|
||||
set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
|
||||
dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
|
||||
rst_type);
|
||||
return;
|
||||
}
|
||||
set_bit(rst_type, &hdev->default_reset_request);
|
||||
}
|
||||
|
||||
@@ -1847,14 +1873,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
|
||||
*/
|
||||
if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
|
||||
/* prepare for full reset of stack + pcie interface */
|
||||
set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
|
||||
hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
|
||||
|
||||
/* "defer" schedule the reset task again */
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
} else {
|
||||
hdev->reset_attempts++;
|
||||
|
||||
set_bit(hdev->reset_level, &hdev->reset_pending);
|
||||
hclgevf_set_reset_pending(hdev, hdev->reset_level);
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
}
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
@@ -1977,7 +2003,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
|
||||
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"receive reset interrupt 0x%x!\n", rst_ing_reg);
|
||||
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
|
||||
hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
|
||||
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
|
||||
@@ -2287,6 +2313,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
|
||||
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
|
||||
/* timer needs to be initialized before misc irq */
|
||||
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
|
||||
|
||||
mutex_init(&hdev->mbx_resp.mbx_mutex);
|
||||
sema_init(&hdev->reset_sem, 1);
|
||||
@@ -2986,7 +3014,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
||||
HCLGEVF_DRIVER_NAME);
|
||||
|
||||
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
|
||||
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
|
||||
void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
|
||||
void *data)
|
||||
{
|
||||
#define HCLGEVF_RING_REG_OFFSET 0x200
|
||||
#define HCLGEVF_RING_INT_REG_OFFSET 0x4
|
||||
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
struct hnae3_queue *tqp;
|
||||
int i, j, reg_um;
|
||||
u32 *reg = data;
|
||||
|
||||
@@ -147,10 +147,11 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
|
||||
reg_um = ARRAY_SIZE(ring_reg_addr_list);
|
||||
for (j = 0; j < hdev->num_tqps; j++) {
|
||||
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
|
||||
tqp = &hdev->htqp[j].q;
|
||||
for (i = 0; i < reg_um; i++)
|
||||
*reg++ = hclgevf_read_dev(&hdev->hw,
|
||||
ring_reg_addr_list[i] +
|
||||
HCLGEVF_RING_REG_OFFSET * j);
|
||||
*reg++ = readl_relaxed(tqp->io_base -
|
||||
HCLGEVF_TQP_REG_OFFSET +
|
||||
ring_reg_addr_list[i]);
|
||||
}
|
||||
|
||||
reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
|
||||
|
||||
@@ -2264,6 +2264,8 @@ struct ice_aqc_get_pkg_info_resp {
|
||||
struct ice_aqc_get_pkg_info pkg_info[];
|
||||
};
|
||||
|
||||
#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0)
|
||||
|
||||
/* Get CGU abilities command response data structure (indirect 0x0C61) */
|
||||
struct ice_aqc_get_cgu_abilities {
|
||||
u8 num_inputs;
|
||||
|
||||
@@ -2064,6 +2064,18 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dpll_phase_range_set - initialize phase adjust range helper
|
||||
* @range: pointer to phase adjust range struct to be initialized
|
||||
* @phase_adj: a value to be used as min(-)/max(+) boundary
|
||||
*/
|
||||
static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range,
|
||||
u32 phase_adj)
|
||||
{
|
||||
range->min = -phase_adj;
|
||||
range->max = phase_adj;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dpll_init_info_pins_generic - initializes generic pins info
|
||||
* @pf: board private structure
|
||||
@@ -2105,8 +2117,8 @@ static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input)
|
||||
for (i = 0; i < pin_num; i++) {
|
||||
pins[i].idx = i;
|
||||
pins[i].prop.board_label = labels[i];
|
||||
pins[i].prop.phase_range.min = phase_adj_max;
|
||||
pins[i].prop.phase_range.max = -phase_adj_max;
|
||||
ice_dpll_phase_range_set(&pins[i].prop.phase_range,
|
||||
phase_adj_max);
|
||||
pins[i].prop.capabilities = cap;
|
||||
pins[i].pf = pf;
|
||||
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
|
||||
@@ -2152,6 +2164,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
struct ice_dpll_pin *pins;
|
||||
unsigned long caps;
|
||||
u32 phase_adj_max;
|
||||
u8 freq_supp_num;
|
||||
bool input;
|
||||
|
||||
@@ -2159,11 +2172,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
|
||||
case ICE_DPLL_PIN_TYPE_INPUT:
|
||||
pins = pf->dplls.inputs;
|
||||
num_pins = pf->dplls.num_inputs;
|
||||
phase_adj_max = pf->dplls.input_phase_adj_max;
|
||||
input = true;
|
||||
break;
|
||||
case ICE_DPLL_PIN_TYPE_OUTPUT:
|
||||
pins = pf->dplls.outputs;
|
||||
num_pins = pf->dplls.num_outputs;
|
||||
phase_adj_max = pf->dplls.output_phase_adj_max;
|
||||
input = false;
|
||||
break;
|
||||
default:
|
||||
@@ -2188,19 +2203,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
|
||||
return ret;
|
||||
caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
|
||||
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
|
||||
pins[i].prop.phase_range.min =
|
||||
pf->dplls.input_phase_adj_max;
|
||||
pins[i].prop.phase_range.max =
|
||||
-pf->dplls.input_phase_adj_max;
|
||||
} else {
|
||||
pins[i].prop.phase_range.min =
|
||||
pf->dplls.output_phase_adj_max;
|
||||
pins[i].prop.phase_range.max =
|
||||
-pf->dplls.output_phase_adj_max;
|
||||
ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ice_dpll_phase_range_set(&pins[i].prop.phase_range,
|
||||
phase_adj_max);
|
||||
pins[i].prop.capabilities = caps;
|
||||
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
|
||||
if (ret)
|
||||
@@ -2308,8 +2317,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
|
||||
dp->dpll_idx = abilities.pps_dpll_idx;
|
||||
d->num_inputs = abilities.num_inputs;
|
||||
d->num_outputs = abilities.num_outputs;
|
||||
d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
|
||||
d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
|
||||
d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) &
|
||||
ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
|
||||
d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) &
|
||||
ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
|
||||
|
||||
alloc_size = sizeof(*d->inputs) * d->num_inputs;
|
||||
d->inputs = kzalloc(alloc_size, GFP_KERNEL);
|
||||
|
||||
@@ -761,9 +761,9 @@ const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
|
||||
/* rx_desk_rsgb_par */
|
||||
644531250, /* 644.53125 MHz Reed Solomon gearbox */
|
||||
/* tx_desk_rsgb_pcs */
|
||||
644531250, /* 644.53125 MHz Reed Solomon gearbox */
|
||||
390625000, /* 390.625 MHz Reed Solomon gearbox */
|
||||
/* rx_desk_rsgb_pcs */
|
||||
644531250, /* 644.53125 MHz Reed Solomon gearbox */
|
||||
390625000, /* 390.625 MHz Reed Solomon gearbox */
|
||||
/* tx_fixed_delay */
|
||||
1620,
|
||||
/* pmd_adj_divisor */
|
||||
|
||||
@@ -68,6 +68,10 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
|
||||
u32 eecd = rd32(IGC_EECD);
|
||||
u16 size;
|
||||
|
||||
/* failed to read reg and got all F's */
|
||||
if (!(~eecd))
|
||||
return -ENXIO;
|
||||
|
||||
size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd);
|
||||
|
||||
/* Added to a constant, "size" becomes the left-shift value
|
||||
@@ -221,6 +225,8 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
|
||||
|
||||
/* NVM initialization */
|
||||
ret_val = igc_init_nvm_params_base(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
switch (hw->mac.type) {
|
||||
case igc_i225:
|
||||
ret_val = igc_init_nvm_params_i225(hw);
|
||||
|
||||
@@ -1013,6 +1013,7 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
complete(&ent->done);
|
||||
}
|
||||
up(&cmd->vars.sem);
|
||||
complete(&ent->slotted);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -13,7 +13,6 @@ fbnic-y := fbnic_csr.o \
|
||||
fbnic_ethtool.o \
|
||||
fbnic_fw.o \
|
||||
fbnic_hw_stats.o \
|
||||
fbnic_hwmon.o \
|
||||
fbnic_irq.o \
|
||||
fbnic_mac.o \
|
||||
fbnic_netdev.o \
|
||||
|
||||
@@ -20,7 +20,6 @@ struct fbnic_dev {
|
||||
struct device *dev;
|
||||
struct net_device *netdev;
|
||||
struct dentry *dbg_fbd;
|
||||
struct device *hwmon;
|
||||
|
||||
u32 __iomem *uc_addr0;
|
||||
u32 __iomem *uc_addr4;
|
||||
@@ -33,7 +32,6 @@ struct fbnic_dev {
|
||||
|
||||
struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
|
||||
struct fbnic_fw_cap fw_cap;
|
||||
struct fbnic_fw_completion *cmpl_data;
|
||||
/* Lock protecting Tx Mailbox queue to prevent possible races */
|
||||
spinlock_t fw_tx_lock;
|
||||
|
||||
@@ -142,9 +140,6 @@ void fbnic_devlink_unregister(struct fbnic_dev *fbd);
|
||||
int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
|
||||
void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
|
||||
|
||||
void fbnic_hwmon_register(struct fbnic_dev *fbd);
|
||||
void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
|
||||
|
||||
int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
|
||||
void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
|
||||
|
||||
|
||||
@@ -44,13 +44,6 @@ struct fbnic_fw_cap {
|
||||
u8 link_fec;
|
||||
};
|
||||
|
||||
struct fbnic_fw_completion {
|
||||
struct {
|
||||
s32 millivolts;
|
||||
s32 millidegrees;
|
||||
} tsene;
|
||||
};
|
||||
|
||||
void fbnic_mbx_init(struct fbnic_dev *fbd);
|
||||
void fbnic_mbx_clean(struct fbnic_dev *fbd);
|
||||
void fbnic_mbx_poll(struct fbnic_dev *fbd);
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <linux/hwmon.h>
|
||||
|
||||
#include "fbnic.h"
|
||||
#include "fbnic_mac.h"
|
||||
|
||||
static int fbnic_hwmon_sensor_id(enum hwmon_sensor_types type)
|
||||
{
|
||||
if (type == hwmon_temp)
|
||||
return FBNIC_SENSOR_TEMP;
|
||||
if (type == hwmon_in)
|
||||
return FBNIC_SENSOR_VOLTAGE;
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static umode_t fbnic_hwmon_is_visible(const void *drvdata,
|
||||
enum hwmon_sensor_types type,
|
||||
u32 attr, int channel)
|
||||
{
|
||||
if (type == hwmon_temp && attr == hwmon_temp_input)
|
||||
return 0444;
|
||||
if (type == hwmon_in && attr == hwmon_in_input)
|
||||
return 0444;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fbnic_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
|
||||
u32 attr, int channel, long *val)
|
||||
{
|
||||
struct fbnic_dev *fbd = dev_get_drvdata(dev);
|
||||
const struct fbnic_mac *mac = fbd->mac;
|
||||
int id;
|
||||
|
||||
id = fbnic_hwmon_sensor_id(type);
|
||||
return id < 0 ? id : mac->get_sensor(fbd, id, val);
|
||||
}
|
||||
|
||||
static const struct hwmon_ops fbnic_hwmon_ops = {
|
||||
.is_visible = fbnic_hwmon_is_visible,
|
||||
.read = fbnic_hwmon_read,
|
||||
};
|
||||
|
||||
static const struct hwmon_channel_info *fbnic_hwmon_info[] = {
|
||||
HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
|
||||
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct hwmon_chip_info fbnic_chip_info = {
|
||||
.ops = &fbnic_hwmon_ops,
|
||||
.info = fbnic_hwmon_info,
|
||||
};
|
||||
|
||||
void fbnic_hwmon_register(struct fbnic_dev *fbd)
|
||||
{
|
||||
if (!IS_REACHABLE(CONFIG_HWMON))
|
||||
return;
|
||||
|
||||
fbd->hwmon = hwmon_device_register_with_info(fbd->dev, "fbnic",
|
||||
fbd, &fbnic_chip_info,
|
||||
NULL);
|
||||
if (IS_ERR(fbd->hwmon)) {
|
||||
dev_notice(fbd->dev,
|
||||
"Failed to register hwmon device %pe\n",
|
||||
fbd->hwmon);
|
||||
fbd->hwmon = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void fbnic_hwmon_unregister(struct fbnic_dev *fbd)
|
||||
{
|
||||
if (!IS_REACHABLE(CONFIG_HWMON) || !fbd->hwmon)
|
||||
return;
|
||||
|
||||
hwmon_device_unregister(fbd->hwmon);
|
||||
fbd->hwmon = NULL;
|
||||
}
|
||||
@@ -686,27 +686,6 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
|
||||
MAC_STAT_TX_BROADCAST);
|
||||
}
|
||||
|
||||
static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, long *val)
|
||||
{
|
||||
struct fbnic_fw_completion fw_cmpl;
|
||||
s32 *sensor;
|
||||
|
||||
switch (id) {
|
||||
case FBNIC_SENSOR_TEMP:
|
||||
sensor = &fw_cmpl.tsene.millidegrees;
|
||||
break;
|
||||
case FBNIC_SENSOR_VOLTAGE:
|
||||
sensor = &fw_cmpl.tsene.millivolts;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*val = *sensor;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct fbnic_mac fbnic_mac_asic = {
|
||||
.init_regs = fbnic_mac_init_regs,
|
||||
.pcs_enable = fbnic_pcs_enable_asic,
|
||||
@@ -716,7 +695,6 @@ static const struct fbnic_mac fbnic_mac_asic = {
|
||||
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
|
||||
.link_down = fbnic_mac_link_down_asic,
|
||||
.link_up = fbnic_mac_link_up_asic,
|
||||
.get_sensor = fbnic_mac_get_sensor_asic,
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -47,11 +47,6 @@ enum {
|
||||
#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
|
||||
#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
|
||||
|
||||
enum fbnic_sensor_id {
|
||||
FBNIC_SENSOR_TEMP, /* Temp in millidegrees Centigrade */
|
||||
FBNIC_SENSOR_VOLTAGE, /* Voltage in millivolts */
|
||||
};
|
||||
|
||||
/* This structure defines the interface hooks for the MAC. The MAC hooks
|
||||
* will be configured as a const struct provided with a set of function
|
||||
* pointers.
|
||||
@@ -88,8 +83,6 @@ struct fbnic_mac {
|
||||
|
||||
void (*link_down)(struct fbnic_dev *fbd);
|
||||
void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
|
||||
|
||||
int (*get_sensor)(struct fbnic_dev *fbd, int id, long *val);
|
||||
};
|
||||
|
||||
int fbnic_mac_init(struct fbnic_dev *fbd);
|
||||
|
||||
@@ -296,8 +296,6 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
/* Capture snapshot of hardware stats so netdev can calculate delta */
|
||||
fbnic_reset_hw_stats(fbd);
|
||||
|
||||
fbnic_hwmon_register(fbd);
|
||||
|
||||
if (!fbd->dsn) {
|
||||
dev_warn(&pdev->dev, "Reading serial number failed\n");
|
||||
goto init_failure_mode;
|
||||
@@ -360,7 +358,6 @@ static void fbnic_remove(struct pci_dev *pdev)
|
||||
fbnic_netdev_free(fbd);
|
||||
}
|
||||
|
||||
fbnic_hwmon_unregister(fbd);
|
||||
fbnic_dbg_fbd_exit(fbd);
|
||||
fbnic_devlink_unregister(fbd);
|
||||
fbnic_fw_disable_mbx(fbd);
|
||||
|
||||
@@ -1827,7 +1827,7 @@ static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp)
|
||||
|
||||
for (i = 0; i < tp->int_nums; i++) {
|
||||
irq = pci_irq_vector(pdev, i);
|
||||
if (!irq) {
|
||||
if (irq < 0) {
|
||||
pci_disable_msix(pdev);
|
||||
return irq;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/module.h>
|
||||
@@ -19,6 +20,8 @@ struct tegra_mgbe {
|
||||
struct reset_control *rst_mac;
|
||||
struct reset_control *rst_pcs;
|
||||
|
||||
u32 iommu_sid;
|
||||
|
||||
void __iomem *hv;
|
||||
void __iomem *regs;
|
||||
void __iomem *xpcs;
|
||||
@@ -50,7 +53,6 @@ struct tegra_mgbe {
|
||||
#define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704
|
||||
#define MAC_SBD_INTR BIT(2)
|
||||
#define MGBE_WRAP_AXI_ASID0_CTRL 0x8400
|
||||
#define MGBE_SID 0x6
|
||||
|
||||
static int __maybe_unused tegra_mgbe_suspend(struct device *dev)
|
||||
{
|
||||
@@ -84,7 +86,7 @@ static int __maybe_unused tegra_mgbe_resume(struct device *dev)
|
||||
writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
|
||||
|
||||
/* Program SID */
|
||||
writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
|
||||
writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
|
||||
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
|
||||
if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) {
|
||||
@@ -241,6 +243,12 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(mgbe->xpcs))
|
||||
return PTR_ERR(mgbe->xpcs);
|
||||
|
||||
/* get controller's stream id from iommu property in device tree */
|
||||
if (!tegra_dev_iommu_get_stream_id(mgbe->dev, &mgbe->iommu_sid)) {
|
||||
dev_err(mgbe->dev, "failed to get iommu stream id\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
res.addr = mgbe->regs;
|
||||
res.irq = irq;
|
||||
|
||||
@@ -346,7 +354,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
|
||||
writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
|
||||
|
||||
/* Program SID */
|
||||
writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
|
||||
writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
|
||||
|
||||
plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP;
|
||||
|
||||
|
||||
@@ -334,27 +334,25 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
|
||||
status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
|
||||
timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
|
||||
|
||||
buf[0] = rd32(wx, WX_MNG_MBOX);
|
||||
if ((buf[0] & 0xff0000) >> 16 == 0x80) {
|
||||
wx_err(wx, "Unknown FW command: 0x%x\n", buffer[0] & 0xff);
|
||||
status = -EINVAL;
|
||||
goto rel_out;
|
||||
}
|
||||
|
||||
/* Check command completion */
|
||||
if (status) {
|
||||
wx_dbg(wx, "Command has failed with no status valid.\n");
|
||||
|
||||
buf[0] = rd32(wx, WX_MNG_MBOX);
|
||||
if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
|
||||
status = -EINVAL;
|
||||
goto rel_out;
|
||||
}
|
||||
if ((buf[0] & 0xff0000) >> 16 == 0x80) {
|
||||
wx_dbg(wx, "It's unknown cmd.\n");
|
||||
status = -EINVAL;
|
||||
goto rel_out;
|
||||
}
|
||||
|
||||
wx_err(wx, "Command has failed with no status valid.\n");
|
||||
wx_dbg(wx, "write value:\n");
|
||||
for (i = 0; i < dword_len; i++)
|
||||
wx_dbg(wx, "%x ", buffer[i]);
|
||||
wx_dbg(wx, "read value:\n");
|
||||
for (i = 0; i < dword_len; i++)
|
||||
wx_dbg(wx, "%x ", buf[i]);
|
||||
wx_dbg(wx, "\ncheck: %x %x\n", buffer[0] & 0xff, ~buf[0] >> 24);
|
||||
|
||||
goto rel_out;
|
||||
}
|
||||
|
||||
if (!return_data)
|
||||
|
||||
@@ -3072,7 +3072,11 @@ static int ca8210_probe(struct spi_device *spi_device)
|
||||
spi_set_drvdata(priv->spi, priv);
|
||||
if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) {
|
||||
cascoda_api_upstream = ca8210_test_int_driver_write;
|
||||
ca8210_test_interface_init(priv);
|
||||
ret = ca8210_test_interface_init(priv);
|
||||
if (ret) {
|
||||
dev_crit(&spi_device->dev, "ca8210_test_interface_init failed\n");
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
cascoda_api_upstream = NULL;
|
||||
}
|
||||
|
||||
@@ -125,6 +125,8 @@ static int mctp_i3c_read(struct mctp_i3c_device *mi)
|
||||
|
||||
xfer.data.in = skb_put(skb, mi->mrl);
|
||||
|
||||
/* Make sure netif_rx() is read in the same order as i3c. */
|
||||
mutex_lock(&mi->lock);
|
||||
rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1);
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
@@ -166,8 +168,10 @@ static int mctp_i3c_read(struct mctp_i3c_device *mi)
|
||||
stats->rx_dropped++;
|
||||
}
|
||||
|
||||
mutex_unlock(&mi->lock);
|
||||
return 0;
|
||||
err:
|
||||
mutex_unlock(&mi->lock);
|
||||
kfree_skb(skb);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
|
||||
|
||||
static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
|
||||
{
|
||||
return inet_csk_reqsk_queue_len(sk) >= READ_ONCE(sk->sk_max_ack_backlog);
|
||||
return inet_csk_reqsk_queue_len(sk) > READ_ONCE(sk->sk_max_ack_backlog);
|
||||
}
|
||||
|
||||
bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
|
||||
|
||||
@@ -55,11 +55,11 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
goto drop;
|
||||
|
||||
rcu_read_lock();
|
||||
proto = find_snap_client(skb_transport_header(skb));
|
||||
proto = find_snap_client(skb->data);
|
||||
if (proto) {
|
||||
/* Pass the frame on. */
|
||||
skb->transport_header += 5;
|
||||
skb_pull_rcsum(skb, 5);
|
||||
skb_reset_transport_header(skb);
|
||||
rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
@@ -1031,9 +1031,9 @@ static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
|
||||
|
||||
static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
|
||||
{
|
||||
/* If we're advertising or initiating an LE connection we can't
|
||||
* go ahead and change the random address at this time. This is
|
||||
* because the eventual initiator address used for the
|
||||
/* If a random_addr has been set we're advertising or initiating an LE
|
||||
* connection we can't go ahead and change the random address at this
|
||||
* time. This is because the eventual initiator address used for the
|
||||
* subsequently created connection will be undefined (some
|
||||
* controllers use the new address and others the one we had
|
||||
* when the operation started).
|
||||
@@ -1041,8 +1041,9 @@ static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
|
||||
* In this kind of scenario skip the update and let the random
|
||||
* address be updated at the next cycle.
|
||||
*/
|
||||
if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
|
||||
hci_lookup_le_connect(hdev)) {
|
||||
if (bacmp(&hdev->random_addr, BDADDR_ANY) &&
|
||||
(hci_dev_test_flag(hdev, HCI_LE_ADV) ||
|
||||
hci_lookup_le_connect(hdev))) {
|
||||
bt_dev_dbg(hdev, "Deferring random address update");
|
||||
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
|
||||
return 0;
|
||||
|
||||
@@ -7655,6 +7655,24 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
|
||||
mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
|
||||
}
|
||||
|
||||
static void add_device_complete(struct hci_dev *hdev, void *data, int err)
|
||||
{
|
||||
struct mgmt_pending_cmd *cmd = data;
|
||||
struct mgmt_cp_add_device *cp = cmd->param;
|
||||
|
||||
if (!err) {
|
||||
device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
|
||||
cp->action);
|
||||
device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
|
||||
cp->addr.type, hdev->conn_flags,
|
||||
PTR_UINT(cmd->user_data));
|
||||
}
|
||||
|
||||
mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
|
||||
mgmt_status(err), &cp->addr, sizeof(cp->addr));
|
||||
mgmt_pending_free(cmd);
|
||||
}
|
||||
|
||||
static int add_device_sync(struct hci_dev *hdev, void *data)
|
||||
{
|
||||
return hci_update_passive_scan_sync(hdev);
|
||||
@@ -7663,6 +7681,7 @@ static int add_device_sync(struct hci_dev *hdev, void *data)
|
||||
static int add_device(struct sock *sk, struct hci_dev *hdev,
|
||||
void *data, u16 len)
|
||||
{
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
struct mgmt_cp_add_device *cp = data;
|
||||
u8 auto_conn, addr_type;
|
||||
struct hci_conn_params *params;
|
||||
@@ -7743,9 +7762,24 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
|
||||
current_flags = params->flags;
|
||||
}
|
||||
|
||||
err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
|
||||
if (err < 0)
|
||||
cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
|
||||
if (!cmd) {
|
||||
err = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
cmd->user_data = UINT_PTR(current_flags);
|
||||
|
||||
err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
|
||||
add_device_complete);
|
||||
if (err < 0) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
|
||||
MGMT_STATUS_FAILED, &cp->addr,
|
||||
sizeof(cp->addr));
|
||||
mgmt_pending_free(cmd);
|
||||
}
|
||||
|
||||
goto unlock;
|
||||
|
||||
added:
|
||||
device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
|
||||
|
||||
@@ -201,14 +201,14 @@ static ssize_t address_show(struct device *tty_dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
|
||||
return sprintf(buf, "%pMR\n", &dev->dst);
|
||||
return sysfs_emit(buf, "%pMR\n", &dev->dst);
|
||||
}
|
||||
|
||||
static ssize_t channel_show(struct device *tty_dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
|
||||
return sprintf(buf, "%d\n", dev->channel);
|
||||
return sysfs_emit(buf, "%d\n", dev->channel);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(address);
|
||||
|
||||
@@ -753,6 +753,36 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_fill_forward_path);
|
||||
|
||||
/* must be called under rcu_read_lock(), as we dont take a reference */
|
||||
static struct napi_struct *napi_by_id(unsigned int napi_id)
|
||||
{
|
||||
unsigned int hash = napi_id % HASH_SIZE(napi_hash);
|
||||
struct napi_struct *napi;
|
||||
|
||||
hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
|
||||
if (napi->napi_id == napi_id)
|
||||
return napi;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* must be called under rcu_read_lock(), as we dont take a reference */
|
||||
struct napi_struct *netdev_napi_by_id(struct net *net, unsigned int napi_id)
|
||||
{
|
||||
struct napi_struct *napi;
|
||||
|
||||
napi = napi_by_id(napi_id);
|
||||
if (!napi)
|
||||
return NULL;
|
||||
|
||||
if (WARN_ON_ONCE(!napi->dev))
|
||||
return NULL;
|
||||
if (!net_eq(net, dev_net(napi->dev)))
|
||||
return NULL;
|
||||
|
||||
return napi;
|
||||
}
|
||||
|
||||
/**
|
||||
* __dev_get_by_name - find a device by its name
|
||||
* @net: the applicable net namespace
|
||||
@@ -6293,19 +6323,6 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
|
||||
}
|
||||
EXPORT_SYMBOL(napi_complete_done);
|
||||
|
||||
/* must be called under rcu_read_lock(), as we dont take a reference */
|
||||
struct napi_struct *napi_by_id(unsigned int napi_id)
|
||||
{
|
||||
unsigned int hash = napi_id % HASH_SIZE(napi_hash);
|
||||
struct napi_struct *napi;
|
||||
|
||||
hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
|
||||
if (napi->napi_id == napi_id)
|
||||
return napi;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void skb_defer_free_flush(struct softnet_data *sd)
|
||||
{
|
||||
struct sk_buff *skb, *next;
|
||||
|
||||
@@ -22,6 +22,8 @@ struct sd_flow_limit {
|
||||
|
||||
extern int netdev_flow_limit_table_len;
|
||||
|
||||
struct napi_struct *netdev_napi_by_id(struct net *net, unsigned int napi_id);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
int __init dev_proc_init(void);
|
||||
#else
|
||||
@@ -269,7 +271,6 @@ void xdp_do_check_flushed(struct napi_struct *napi);
|
||||
static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
|
||||
#endif
|
||||
|
||||
struct napi_struct *napi_by_id(unsigned int napi_id);
|
||||
void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
|
||||
|
||||
#define XMIT_RECURSION_LIMIT 8
|
||||
|
||||
@@ -42,14 +42,18 @@ static unsigned int default_operstate(const struct net_device *dev)
|
||||
* first check whether lower is indeed the source of its down state.
|
||||
*/
|
||||
if (!netif_carrier_ok(dev)) {
|
||||
int iflink = dev_get_iflink(dev);
|
||||
struct net_device *peer;
|
||||
int iflink;
|
||||
|
||||
/* If called from netdev_run_todo()/linkwatch_sync_dev(),
|
||||
* dev_net(dev) can be already freed, and RTNL is not held.
|
||||
*/
|
||||
if (dev->reg_state == NETREG_UNREGISTERED ||
|
||||
iflink == dev->ifindex)
|
||||
if (dev->reg_state <= NETREG_REGISTERED)
|
||||
iflink = dev_get_iflink(dev);
|
||||
else
|
||||
iflink = dev->ifindex;
|
||||
|
||||
if (iflink == dev->ifindex)
|
||||
return IF_OPER_DOWN;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
@@ -167,8 +167,6 @@ netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
|
||||
void *hdr;
|
||||
pid_t pid;
|
||||
|
||||
if (WARN_ON_ONCE(!napi->dev))
|
||||
return -EINVAL;
|
||||
if (!(napi->dev->flags & IFF_UP))
|
||||
return 0;
|
||||
|
||||
@@ -176,8 +174,7 @@ netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
|
||||
if (!hdr)
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (napi->napi_id >= MIN_NAPI_ID &&
|
||||
nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
|
||||
if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
|
||||
@@ -235,7 +232,7 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
rtnl_lock();
|
||||
rcu_read_lock();
|
||||
|
||||
napi = napi_by_id(napi_id);
|
||||
napi = netdev_napi_by_id(genl_info_net(info), napi_id);
|
||||
if (napi) {
|
||||
err = netdev_nl_napi_fill_one(rsp, napi, info);
|
||||
} else {
|
||||
@@ -272,6 +269,8 @@ netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
|
||||
return err;
|
||||
|
||||
list_for_each_entry(napi, &netdev->napi_list, dev_list) {
|
||||
if (napi->napi_id < MIN_NAPI_ID)
|
||||
continue;
|
||||
if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
|
||||
continue;
|
||||
|
||||
@@ -354,7 +353,7 @@ int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
rtnl_lock();
|
||||
rcu_read_lock();
|
||||
|
||||
napi = napi_by_id(napi_id);
|
||||
napi = netdev_napi_by_id(genl_info_net(info), napi_id);
|
||||
if (napi) {
|
||||
err = netdev_nl_napi_set_config(napi, info);
|
||||
} else {
|
||||
|
||||
@@ -896,7 +896,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
|
||||
sock_net_set(ctl_sk, net);
|
||||
if (sk) {
|
||||
ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
|
||||
inet_twsk(sk)->tw_mark : sk->sk_mark;
|
||||
inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
|
||||
ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
|
||||
inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
|
||||
transmit_time = tcp_transmit_time(sk);
|
||||
|
||||
@@ -684,6 +684,10 @@ void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata)
|
||||
ASSERT_RTNL();
|
||||
|
||||
mutex_lock(&sdata->local->iflist_mtx);
|
||||
if (list_empty(&sdata->local->interfaces)) {
|
||||
mutex_unlock(&sdata->local->iflist_mtx);
|
||||
return;
|
||||
}
|
||||
list_del_rcu(&sdata->list);
|
||||
mutex_unlock(&sdata->local->iflist_mtx);
|
||||
|
||||
|
||||
@@ -102,16 +102,15 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static int mptcp_set_scheduler(const struct net *net, const char *name)
|
||||
static int mptcp_set_scheduler(char *scheduler, const char *name)
|
||||
{
|
||||
struct mptcp_pernet *pernet = mptcp_get_pernet(net);
|
||||
struct mptcp_sched_ops *sched;
|
||||
int ret = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
sched = mptcp_sched_find(name);
|
||||
if (sched)
|
||||
strscpy(pernet->scheduler, name, MPTCP_SCHED_NAME_MAX);
|
||||
strscpy(scheduler, name, MPTCP_SCHED_NAME_MAX);
|
||||
else
|
||||
ret = -ENOENT;
|
||||
rcu_read_unlock();
|
||||
@@ -122,7 +121,7 @@ static int mptcp_set_scheduler(const struct net *net, const char *name)
|
||||
static int proc_scheduler(const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
const struct net *net = current->nsproxy->net_ns;
|
||||
char (*scheduler)[MPTCP_SCHED_NAME_MAX] = ctl->data;
|
||||
char val[MPTCP_SCHED_NAME_MAX];
|
||||
struct ctl_table tbl = {
|
||||
.data = val,
|
||||
@@ -130,11 +129,11 @@ static int proc_scheduler(const struct ctl_table *ctl, int write,
|
||||
};
|
||||
int ret;
|
||||
|
||||
strscpy(val, mptcp_get_scheduler(net), MPTCP_SCHED_NAME_MAX);
|
||||
strscpy(val, *scheduler, MPTCP_SCHED_NAME_MAX);
|
||||
|
||||
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
|
||||
if (write && ret == 0)
|
||||
ret = mptcp_set_scheduler(net, val);
|
||||
ret = mptcp_set_scheduler(*scheduler, val);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -161,7 +160,9 @@ static int proc_blackhole_detect_timeout(const struct ctl_table *table,
|
||||
int write, void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct mptcp_pernet *pernet = mptcp_get_pernet(current->nsproxy->net_ns);
|
||||
struct mptcp_pernet *pernet = container_of(table->data,
|
||||
struct mptcp_pernet,
|
||||
blackhole_timeout);
|
||||
int ret;
|
||||
|
||||
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
@@ -228,7 +229,7 @@ static struct ctl_table mptcp_sysctl_table[] = {
|
||||
{
|
||||
.procname = "available_schedulers",
|
||||
.maxlen = MPTCP_SCHED_BUF_MAX,
|
||||
.mode = 0644,
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_available_schedulers,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -2517,12 +2517,15 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
|
||||
struct hlist_nulls_head *hash;
|
||||
unsigned int nr_slots, i;
|
||||
|
||||
if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
|
||||
if (*sizep > (INT_MAX / sizeof(struct hlist_nulls_head)))
|
||||
return NULL;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
|
||||
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
|
||||
|
||||
if (nr_slots > (INT_MAX / sizeof(struct hlist_nulls_head)))
|
||||
return NULL;
|
||||
|
||||
hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
|
||||
|
||||
if (hash && nulls)
|
||||
|
||||
@@ -8822,6 +8822,7 @@ static void nft_unregister_flowtable_hook(struct net *net,
|
||||
}
|
||||
|
||||
static void __nft_unregister_flowtable_net_hooks(struct net *net,
|
||||
struct nft_flowtable *flowtable,
|
||||
struct list_head *hook_list,
|
||||
bool release_netdev)
|
||||
{
|
||||
@@ -8829,6 +8830,8 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net,
|
||||
|
||||
list_for_each_entry_safe(hook, next, hook_list, list) {
|
||||
nf_unregister_net_hook(net, &hook->ops);
|
||||
flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
|
||||
FLOW_BLOCK_UNBIND);
|
||||
if (release_netdev) {
|
||||
list_del(&hook->list);
|
||||
kfree_rcu(hook, rcu);
|
||||
@@ -8837,9 +8840,10 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net,
|
||||
}
|
||||
|
||||
static void nft_unregister_flowtable_net_hooks(struct net *net,
|
||||
struct nft_flowtable *flowtable,
|
||||
struct list_head *hook_list)
|
||||
{
|
||||
__nft_unregister_flowtable_net_hooks(net, hook_list, false);
|
||||
__nft_unregister_flowtable_net_hooks(net, flowtable, hook_list, false);
|
||||
}
|
||||
|
||||
static int nft_register_flowtable_net_hooks(struct net *net,
|
||||
@@ -9481,8 +9485,6 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
|
||||
|
||||
flowtable->data.type->free(&flowtable->data);
|
||||
list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
|
||||
flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
|
||||
FLOW_BLOCK_UNBIND);
|
||||
list_del_rcu(&hook->list);
|
||||
kfree_rcu(hook, rcu);
|
||||
}
|
||||
@@ -10870,6 +10872,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
||||
&nft_trans_flowtable_hooks(trans),
|
||||
trans->msg_type);
|
||||
nft_unregister_flowtable_net_hooks(net,
|
||||
nft_trans_flowtable(trans),
|
||||
&nft_trans_flowtable_hooks(trans));
|
||||
} else {
|
||||
list_del_rcu(&nft_trans_flowtable(trans)->list);
|
||||
@@ -10878,6 +10881,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
||||
NULL,
|
||||
trans->msg_type);
|
||||
nft_unregister_flowtable_net_hooks(net,
|
||||
nft_trans_flowtable(trans),
|
||||
&nft_trans_flowtable(trans)->hook_list);
|
||||
}
|
||||
break;
|
||||
@@ -11140,11 +11144,13 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
||||
case NFT_MSG_NEWFLOWTABLE:
|
||||
if (nft_trans_flowtable_update(trans)) {
|
||||
nft_unregister_flowtable_net_hooks(net,
|
||||
nft_trans_flowtable(trans),
|
||||
&nft_trans_flowtable_hooks(trans));
|
||||
} else {
|
||||
nft_use_dec_restore(&table->use);
|
||||
list_del_rcu(&nft_trans_flowtable(trans)->list);
|
||||
nft_unregister_flowtable_net_hooks(net,
|
||||
nft_trans_flowtable(trans),
|
||||
&nft_trans_flowtable(trans)->hook_list);
|
||||
}
|
||||
break;
|
||||
@@ -11737,7 +11743,8 @@ static void __nft_release_hook(struct net *net, struct nft_table *table)
|
||||
list_for_each_entry(chain, &table->chains, list)
|
||||
__nf_tables_unregister_hook(net, table, chain, true);
|
||||
list_for_each_entry(flowtable, &table->flowtables, list)
|
||||
__nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list,
|
||||
__nft_unregister_flowtable_net_hooks(net, flowtable,
|
||||
&flowtable->hook_list,
|
||||
true);
|
||||
}
|
||||
|
||||
|
||||
@@ -61,8 +61,10 @@ static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
|
||||
|
||||
static struct kmem_cache *rds_tcp_conn_slab;
|
||||
|
||||
static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *fpos);
|
||||
static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *fpos);
|
||||
static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *fpos);
|
||||
|
||||
static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
|
||||
static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
|
||||
@@ -74,7 +76,7 @@ static struct ctl_table rds_tcp_sysctl_table[] = {
|
||||
/* data is per-net pointer */
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = rds_tcp_skbuf_handler,
|
||||
.proc_handler = rds_tcp_sndbuf_handler,
|
||||
.extra1 = &rds_tcp_min_sndbuf,
|
||||
},
|
||||
#define RDS_TCP_RCVBUF 1
|
||||
@@ -83,7 +85,7 @@ static struct ctl_table rds_tcp_sysctl_table[] = {
|
||||
/* data is per-net pointer */
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = rds_tcp_skbuf_handler,
|
||||
.proc_handler = rds_tcp_rcvbuf_handler,
|
||||
.extra1 = &rds_tcp_min_rcvbuf,
|
||||
},
|
||||
};
|
||||
@@ -682,10 +684,10 @@ static void rds_tcp_sysctl_reset(struct net *net)
|
||||
spin_unlock_irq(&rds_tcp_conn_lock);
|
||||
}
|
||||
|
||||
static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write,
|
||||
static int rds_tcp_skbuf_handler(struct rds_tcp_net *rtn,
|
||||
const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *fpos)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
int err;
|
||||
|
||||
err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos);
|
||||
@@ -694,11 +696,34 @@ static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write,
|
||||
*(int *)(ctl->extra1));
|
||||
return err;
|
||||
}
|
||||
if (write)
|
||||
|
||||
if (write && rtn->rds_tcp_listen_sock && rtn->rds_tcp_listen_sock->sk) {
|
||||
struct net *net = sock_net(rtn->rds_tcp_listen_sock->sk);
|
||||
|
||||
rds_tcp_sysctl_reset(net);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *fpos)
|
||||
{
|
||||
struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
|
||||
sndbuf_size);
|
||||
|
||||
return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
|
||||
}
|
||||
|
||||
static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *fpos)
|
||||
{
|
||||
struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
|
||||
rcvbuf_size);
|
||||
|
||||
return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
|
||||
}
|
||||
|
||||
static void rds_tcp_exit(void)
|
||||
{
|
||||
rds_tcp_set_unloading();
|
||||
|
||||
@@ -356,7 +356,8 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
|
||||
[TCA_FLOW_KEYS] = { .type = NLA_U32 },
|
||||
[TCA_FLOW_MODE] = { .type = NLA_U32 },
|
||||
[TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
|
||||
[TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
|
||||
[TCA_FLOW_RSHIFT] = NLA_POLICY_MAX(NLA_U32,
|
||||
31 /* BITS_PER_U32 - 1 */),
|
||||
[TCA_FLOW_ADDEND] = { .type = NLA_U32 },
|
||||
[TCA_FLOW_MASK] = { .type = NLA_U32 },
|
||||
[TCA_FLOW_XOR] = { .type = NLA_U32 },
|
||||
|
||||
@@ -627,6 +627,63 @@ static bool cake_ddst(int flow_mode)
|
||||
return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
|
||||
}
|
||||
|
||||
static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
|
||||
struct cake_flow *flow,
|
||||
int flow_mode)
|
||||
{
|
||||
if (likely(cake_dsrc(flow_mode) &&
|
||||
q->hosts[flow->srchost].srchost_bulk_flow_count))
|
||||
q->hosts[flow->srchost].srchost_bulk_flow_count--;
|
||||
}
|
||||
|
||||
static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
|
||||
struct cake_flow *flow,
|
||||
int flow_mode)
|
||||
{
|
||||
if (likely(cake_dsrc(flow_mode) &&
|
||||
q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
|
||||
q->hosts[flow->srchost].srchost_bulk_flow_count++;
|
||||
}
|
||||
|
||||
static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
|
||||
struct cake_flow *flow,
|
||||
int flow_mode)
|
||||
{
|
||||
if (likely(cake_ddst(flow_mode) &&
|
||||
q->hosts[flow->dsthost].dsthost_bulk_flow_count))
|
||||
q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
|
||||
}
|
||||
|
||||
static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
|
||||
struct cake_flow *flow,
|
||||
int flow_mode)
|
||||
{
|
||||
if (likely(cake_ddst(flow_mode) &&
|
||||
q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
|
||||
q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
|
||||
}
|
||||
|
||||
static u16 cake_get_flow_quantum(struct cake_tin_data *q,
|
||||
struct cake_flow *flow,
|
||||
int flow_mode)
|
||||
{
|
||||
u16 host_load = 1;
|
||||
|
||||
if (cake_dsrc(flow_mode))
|
||||
host_load = max(host_load,
|
||||
q->hosts[flow->srchost].srchost_bulk_flow_count);
|
||||
|
||||
if (cake_ddst(flow_mode))
|
||||
host_load = max(host_load,
|
||||
q->hosts[flow->dsthost].dsthost_bulk_flow_count);
|
||||
|
||||
/* The get_random_u16() is a way to apply dithering to avoid
|
||||
* accumulating roundoff errors
|
||||
*/
|
||||
return (q->flow_quantum * quantum_div[host_load] +
|
||||
get_random_u16()) >> 16;
|
||||
}
|
||||
|
||||
static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
|
||||
int flow_mode, u16 flow_override, u16 host_override)
|
||||
{
|
||||
@@ -773,10 +830,8 @@ skip_hash:
|
||||
allocate_dst = cake_ddst(flow_mode);
|
||||
|
||||
if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
|
||||
if (allocate_src)
|
||||
q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
|
||||
if (allocate_dst)
|
||||
q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
|
||||
cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
|
||||
cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
|
||||
}
|
||||
found:
|
||||
/* reserve queue for future packets in same flow */
|
||||
@@ -801,9 +856,10 @@ found:
|
||||
q->hosts[outer_hash + k].srchost_tag = srchost_hash;
|
||||
found_src:
|
||||
srchost_idx = outer_hash + k;
|
||||
if (q->flows[reduced_hash].set == CAKE_SET_BULK)
|
||||
q->hosts[srchost_idx].srchost_bulk_flow_count++;
|
||||
q->flows[reduced_hash].srchost = srchost_idx;
|
||||
|
||||
if (q->flows[reduced_hash].set == CAKE_SET_BULK)
|
||||
cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
|
||||
}
|
||||
|
||||
if (allocate_dst) {
|
||||
@@ -824,9 +880,10 @@ found_src:
|
||||
q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
|
||||
found_dst:
|
||||
dsthost_idx = outer_hash + k;
|
||||
if (q->flows[reduced_hash].set == CAKE_SET_BULK)
|
||||
q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
|
||||
q->flows[reduced_hash].dsthost = dsthost_idx;
|
||||
|
||||
if (q->flows[reduced_hash].set == CAKE_SET_BULK)
|
||||
cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1839,10 +1896,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
|
||||
/* flowchain */
|
||||
if (!flow->set || flow->set == CAKE_SET_DECAYING) {
|
||||
struct cake_host *srchost = &b->hosts[flow->srchost];
|
||||
struct cake_host *dsthost = &b->hosts[flow->dsthost];
|
||||
u16 host_load = 1;
|
||||
|
||||
if (!flow->set) {
|
||||
list_add_tail(&flow->flowchain, &b->new_flows);
|
||||
} else {
|
||||
@@ -1852,18 +1905,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
flow->set = CAKE_SET_SPARSE;
|
||||
b->sparse_flow_count++;
|
||||
|
||||
if (cake_dsrc(q->flow_mode))
|
||||
host_load = max(host_load, srchost->srchost_bulk_flow_count);
|
||||
|
||||
if (cake_ddst(q->flow_mode))
|
||||
host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
|
||||
|
||||
flow->deficit = (b->flow_quantum *
|
||||
quantum_div[host_load]) >> 16;
|
||||
flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
|
||||
} else if (flow->set == CAKE_SET_SPARSE_WAIT) {
|
||||
struct cake_host *srchost = &b->hosts[flow->srchost];
|
||||
struct cake_host *dsthost = &b->hosts[flow->dsthost];
|
||||
|
||||
/* this flow was empty, accounted as a sparse flow, but actually
|
||||
* in the bulk rotation.
|
||||
*/
|
||||
@@ -1871,12 +1914,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
b->sparse_flow_count--;
|
||||
b->bulk_flow_count++;
|
||||
|
||||
if (cake_dsrc(q->flow_mode))
|
||||
srchost->srchost_bulk_flow_count++;
|
||||
|
||||
if (cake_ddst(q->flow_mode))
|
||||
dsthost->dsthost_bulk_flow_count++;
|
||||
|
||||
cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
|
||||
cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
|
||||
}
|
||||
|
||||
if (q->buffer_used > q->buffer_max_used)
|
||||
@@ -1933,13 +1972,11 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
|
||||
{
|
||||
struct cake_sched_data *q = qdisc_priv(sch);
|
||||
struct cake_tin_data *b = &q->tins[q->cur_tin];
|
||||
struct cake_host *srchost, *dsthost;
|
||||
ktime_t now = ktime_get();
|
||||
struct cake_flow *flow;
|
||||
struct list_head *head;
|
||||
bool first_flow = true;
|
||||
struct sk_buff *skb;
|
||||
u16 host_load;
|
||||
u64 delay;
|
||||
u32 len;
|
||||
|
||||
@@ -2039,11 +2076,6 @@ retry:
|
||||
q->cur_flow = flow - b->flows;
|
||||
first_flow = false;
|
||||
|
||||
/* triple isolation (modified DRR++) */
|
||||
srchost = &b->hosts[flow->srchost];
|
||||
dsthost = &b->hosts[flow->dsthost];
|
||||
host_load = 1;
|
||||
|
||||
/* flow isolation (DRR++) */
|
||||
if (flow->deficit <= 0) {
|
||||
/* Keep all flows with deficits out of the sparse and decaying
|
||||
@@ -2055,11 +2087,8 @@ retry:
|
||||
b->sparse_flow_count--;
|
||||
b->bulk_flow_count++;
|
||||
|
||||
if (cake_dsrc(q->flow_mode))
|
||||
srchost->srchost_bulk_flow_count++;
|
||||
|
||||
if (cake_ddst(q->flow_mode))
|
||||
dsthost->dsthost_bulk_flow_count++;
|
||||
cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
|
||||
cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
|
||||
|
||||
flow->set = CAKE_SET_BULK;
|
||||
} else {
|
||||
@@ -2071,19 +2100,7 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
if (cake_dsrc(q->flow_mode))
|
||||
host_load = max(host_load, srchost->srchost_bulk_flow_count);
|
||||
|
||||
if (cake_ddst(q->flow_mode))
|
||||
host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
|
||||
|
||||
WARN_ON(host_load > CAKE_QUEUES);
|
||||
|
||||
/* The get_random_u16() is a way to apply dithering to avoid
|
||||
* accumulating roundoff errors
|
||||
*/
|
||||
flow->deficit += (b->flow_quantum * quantum_div[host_load] +
|
||||
get_random_u16()) >> 16;
|
||||
flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
|
||||
list_move_tail(&flow->flowchain, &b->old_flows);
|
||||
|
||||
goto retry;
|
||||
@@ -2107,11 +2124,8 @@ retry:
|
||||
if (flow->set == CAKE_SET_BULK) {
|
||||
b->bulk_flow_count--;
|
||||
|
||||
if (cake_dsrc(q->flow_mode))
|
||||
srchost->srchost_bulk_flow_count--;
|
||||
|
||||
if (cake_ddst(q->flow_mode))
|
||||
dsthost->dsthost_bulk_flow_count--;
|
||||
cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
|
||||
cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
|
||||
|
||||
b->decaying_flow_count++;
|
||||
} else if (flow->set == CAKE_SET_SPARSE ||
|
||||
@@ -2129,12 +2143,8 @@ retry:
|
||||
else if (flow->set == CAKE_SET_BULK) {
|
||||
b->bulk_flow_count--;
|
||||
|
||||
if (cake_dsrc(q->flow_mode))
|
||||
srchost->srchost_bulk_flow_count--;
|
||||
|
||||
if (cake_ddst(q->flow_mode))
|
||||
dsthost->dsthost_bulk_flow_count--;
|
||||
|
||||
cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
|
||||
cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
|
||||
} else
|
||||
b->decaying_flow_count--;
|
||||
|
||||
|
||||
@@ -387,7 +387,8 @@ static struct ctl_table sctp_net_table[] = {
|
||||
static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
struct net *net = container_of(ctl->data, struct net,
|
||||
sctp.sctp_hmac_alg);
|
||||
struct ctl_table tbl;
|
||||
bool changed = false;
|
||||
char *none = "none";
|
||||
@@ -432,7 +433,7 @@ static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write,
|
||||
static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
struct net *net = container_of(ctl->data, struct net, sctp.rto_min);
|
||||
unsigned int min = *(unsigned int *) ctl->extra1;
|
||||
unsigned int max = *(unsigned int *) ctl->extra2;
|
||||
struct ctl_table tbl;
|
||||
@@ -460,7 +461,7 @@ static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write,
|
||||
static int proc_sctp_do_rto_max(const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
struct net *net = container_of(ctl->data, struct net, sctp.rto_max);
|
||||
unsigned int min = *(unsigned int *) ctl->extra1;
|
||||
unsigned int max = *(unsigned int *) ctl->extra2;
|
||||
struct ctl_table tbl;
|
||||
@@ -498,7 +499,7 @@ static int proc_sctp_do_alpha_beta(const struct ctl_table *ctl, int write,
|
||||
static int proc_sctp_do_auth(const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
struct net *net = container_of(ctl->data, struct net, sctp.auth_enable);
|
||||
struct ctl_table tbl;
|
||||
int new_value, ret;
|
||||
|
||||
@@ -527,7 +528,7 @@ static int proc_sctp_do_auth(const struct ctl_table *ctl, int write,
|
||||
static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
struct net *net = container_of(ctl->data, struct net, sctp.udp_port);
|
||||
unsigned int min = *(unsigned int *)ctl->extra1;
|
||||
unsigned int max = *(unsigned int *)ctl->extra2;
|
||||
struct ctl_table tbl;
|
||||
@@ -568,7 +569,8 @@ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
|
||||
static int proc_sctp_do_probe_interval(const struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
struct net *net = container_of(ctl->data, struct net,
|
||||
sctp.probe_interval);
|
||||
struct ctl_table tbl;
|
||||
int ret, new_value;
|
||||
|
||||
|
||||
@@ -458,7 +458,7 @@ int tls_tx_records(struct sock *sk, int flags)
|
||||
|
||||
tx_err:
|
||||
if (rc < 0 && rc != -EAGAIN)
|
||||
tls_err_abort(sk, -EBADMSG);
|
||||
tls_err_abort(sk, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -78,10 +78,10 @@
|
||||
"setup": [
|
||||
"$TC qdisc add dev $DEV1 ingress"
|
||||
],
|
||||
"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key dst rshift 0xff",
|
||||
"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key dst rshift 0x1f",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol ip prio 1 flow",
|
||||
"matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst rshift 255 baseclass",
|
||||
"matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst rshift 31 baseclass",
|
||||
"matchCount": "1",
|
||||
"teardown": [
|
||||
"$TC qdisc del dev $DEV1 ingress"
|
||||
|
||||
Reference in New Issue
Block a user