mirror of
https://github.com/torvalds/linux.git
synced 2026-01-12 00:42:35 +08:00
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"A new Pensando ionic driver, a new Gen 3 HW support for Intel irdma,
and lots of small bnxt_re improvements.
- Small bug fixes and improves to hfi1, efa, mlx5, erdma, rdmarvt,
siw
- Allow userspace access to IB service records through the rdmacm
- Optimize dma mapping for erdma
- Fix shutdown of the GSI QP in mana
- Support relaxed ordering MR and fix a corruption bug with mlx5 DMA
Data Direct
- Many improvement to bnxt_re:
- Debugging features and counters
- Improve performance of some commands
- Change flow_label reporting in completions
- Mirror vnic
- RDMA flow support
- New RDMA driver for Pensando Ethernet devices: ionic
- Gen 3 hardware support for the Intel irdma driver
- Fix rdma routing resolution with VRFs"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (85 commits)
RDMA/ionic: Fix memory leak of admin q_wr
RDMA/siw: Always report immediate post SQ errors
RDMA/bnxt_re: improve clarity in ALLOC_PAGE handler
RDMA/irdma: Remove unused struct irdma_cq fields
RDMA/irdma: Fix positive vs negative error codes in irdma_post_send()
RDMA/bnxt_re: Remove non-statistics counters from hw_counters
RDMA/bnxt_re: Add debugfs info entry for device and resource information
RDMA/bnxt_re: Fix incorrect errno used in function comments
RDMA: Use %pe format specifier for error pointers
RDMA/ionic: Use ether_addr_copy instead of memcpy
RDMA/ionic: Fix build failure on SPARC due to xchg() operand size
RDMA/rxe: Fix race in do_task() when draining
IB/sa: Fix sa_local_svc_timeout_ms read race
IB/ipoib: Ignore L3 master device
RDMA/core: Use route entry flag to decide on loopback traffic
RDMA/core: Resolve MAC of next-hop device without ARP support
RDMA/core: Squash a single user static function
RDMA/irdma: Update Kconfig
RDMA/irdma: Extend CQE Error and Flush Handling for GEN3 Devices
RDMA/irdma: Add Atomic Operations support
...
This commit is contained in:
@@ -50,6 +50,7 @@ Contents:
|
||||
neterion/s2io
|
||||
netronome/nfp
|
||||
pensando/ionic
|
||||
pensando/ionic_rdma
|
||||
qualcomm/ppe/ppe
|
||||
smsc/smc9
|
||||
stmicro/stmmac
|
||||
|
||||
@@ -13,6 +13,7 @@ Contents
|
||||
- Identifying the Adapter
|
||||
- Enabling the driver
|
||||
- Configuring the driver
|
||||
- RDMA Support via Auxiliary Device
|
||||
- Statistics
|
||||
- Support
|
||||
|
||||
@@ -105,6 +106,15 @@ XDP
|
||||
Support for XDP includes the basics, plus Jumbo frames, Redirect and
|
||||
ndo_xmit. There is no current support for zero-copy sockets or HW offload.
|
||||
|
||||
RDMA Support via Auxiliary Device
|
||||
=================================
|
||||
|
||||
The ionic driver supports RDMA (Remote Direct Memory Access) functionality
|
||||
through the Linux auxiliary device framework when advertised by the firmware.
|
||||
RDMA capability is detected during device initialization, and if supported,
|
||||
the ethernet driver will create an auxiliary device that allows the RDMA
|
||||
driver to bind and provide InfiniBand/RoCE functionality.
|
||||
|
||||
Statistics
|
||||
==========
|
||||
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
===========================================================
|
||||
RDMA Driver for the AMD Pensando(R) Ethernet adapter family
|
||||
===========================================================
|
||||
|
||||
AMD Pensando RDMA driver.
|
||||
Copyright (C) 2018-2025, Advanced Micro Devices, Inc.
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
The ionic_rdma driver provides Remote Direct Memory Access functionality
|
||||
for AMD Pensando DSC (Distributed Services Card) devices. This driver
|
||||
implements RDMA capabilities as an auxiliary driver that operates in
|
||||
conjunction with the ionic ethernet driver.
|
||||
|
||||
The ionic ethernet driver detects RDMA capability during device
|
||||
initialization and creates auxiliary devices that the ionic_rdma driver
|
||||
binds to, establishing the RDMA data path and control interfaces.
|
||||
|
||||
Identifying the Adapter
|
||||
=======================
|
||||
|
||||
See Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
|
||||
for more information on identifying the adapter.
|
||||
|
||||
Enabling the driver
|
||||
===================
|
||||
|
||||
The ionic_rdma driver depends on the ionic ethernet driver.
|
||||
See Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
|
||||
for detailed information on enabling and configuring the ionic driver.
|
||||
|
||||
The ionic_rdma driver is enabled via the standard kernel configuration system,
|
||||
using the make command::
|
||||
|
||||
make oldconfig/menuconfig/etc.
|
||||
|
||||
The driver is located in the menu structure at:
|
||||
|
||||
-> Device Drivers
|
||||
-> InfiniBand support
|
||||
-> AMD Pensando DSC RDMA/RoCE Support
|
||||
|
||||
Support
|
||||
=======
|
||||
|
||||
For general Linux RDMA support, please use the RDMA mailing
|
||||
list, which is monitored by AMD Pensando personnel::
|
||||
|
||||
linux-rdma@vger.kernel.org
|
||||
@@ -1177,6 +1177,15 @@ F: Documentation/networking/device_drivers/ethernet/amd/pds_core.rst
|
||||
F: drivers/net/ethernet/amd/pds_core/
|
||||
F: include/linux/pds/
|
||||
|
||||
AMD PENSANDO RDMA DRIVER
|
||||
M: Abhijit Gangurde <abhijit.gangurde@amd.com>
|
||||
M: Allen Hubbe <allen.hubbe@amd.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/networking/device_drivers/ethernet/pensando/ionic_rdma.rst
|
||||
F: drivers/infiniband/hw/ionic/
|
||||
F: include/uapi/rdma/ionic-abi.h
|
||||
|
||||
AMD PMC DRIVER
|
||||
M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
|
||||
@@ -85,6 +85,7 @@ source "drivers/infiniband/hw/efa/Kconfig"
|
||||
source "drivers/infiniband/hw/erdma/Kconfig"
|
||||
source "drivers/infiniband/hw/hfi1/Kconfig"
|
||||
source "drivers/infiniband/hw/hns/Kconfig"
|
||||
source "drivers/infiniband/hw/ionic/Kconfig"
|
||||
source "drivers/infiniband/hw/irdma/Kconfig"
|
||||
source "drivers/infiniband/hw/mana/Kconfig"
|
||||
source "drivers/infiniband/hw/mlx4/Kconfig"
|
||||
|
||||
@@ -446,36 +446,56 @@ static int addr6_resolve(struct sockaddr *src_sock,
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool is_dst_local(const struct dst_entry *dst)
|
||||
{
|
||||
if (dst->ops->family == AF_INET)
|
||||
return !!(dst_rtable(dst)->rt_type & RTN_LOCAL);
|
||||
else if (dst->ops->family == AF_INET6)
|
||||
return !!(dst_rt6_info(dst)->rt6i_flags & RTF_LOCAL);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int addr_resolve_neigh(const struct dst_entry *dst,
|
||||
const struct sockaddr *dst_in,
|
||||
struct rdma_dev_addr *addr,
|
||||
unsigned int ndev_flags,
|
||||
u32 seq)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (ndev_flags & IFF_LOOPBACK) {
|
||||
if (is_dst_local(dst)) {
|
||||
/* When the destination is local entry, source and destination
|
||||
* are same. Skip the neighbour lookup.
|
||||
*/
|
||||
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
|
||||
} else {
|
||||
if (!(ndev_flags & IFF_NOARP)) {
|
||||
/* If the device doesn't do ARP internally */
|
||||
ret = fetch_ha(dst, addr, dst_in, seq);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
|
||||
return fetch_ha(dst, addr, dst_in, seq);
|
||||
}
|
||||
|
||||
static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
|
||||
const struct sockaddr *dst_in,
|
||||
const struct dst_entry *dst,
|
||||
const struct net_device *ndev)
|
||||
static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
|
||||
const struct sockaddr *dst_in,
|
||||
const struct dst_entry *dst)
|
||||
{
|
||||
int ret = 0;
|
||||
struct net_device *ndev = READ_ONCE(dst->dev);
|
||||
|
||||
if (dst->dev->flags & IFF_LOOPBACK)
|
||||
/* A physical device must be the RDMA device to use */
|
||||
if (is_dst_local(dst)) {
|
||||
int ret;
|
||||
/*
|
||||
* RDMA (IB/RoCE, iWarp) doesn't run on lo interface or
|
||||
* loopback IP address. So if route is resolved to loopback
|
||||
* interface, translate that to a real ndev based on non
|
||||
* loopback IP address.
|
||||
*/
|
||||
ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in);
|
||||
if (IS_ERR(ndev))
|
||||
return -ENODEV;
|
||||
ret = rdma_translate_ip(dst_in, dev_addr);
|
||||
else
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
rdma_copy_src_l2_addr(dev_addr, dst->dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* If there's a gateway and type of device not ARPHRD_INFINIBAND,
|
||||
@@ -490,31 +510,7 @@ static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
|
||||
else
|
||||
dev_addr->network = RDMA_NETWORK_IB;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
|
||||
unsigned int *ndev_flags,
|
||||
const struct sockaddr *dst_in,
|
||||
const struct dst_entry *dst)
|
||||
{
|
||||
struct net_device *ndev = READ_ONCE(dst->dev);
|
||||
|
||||
*ndev_flags = ndev->flags;
|
||||
/* A physical device must be the RDMA device to use */
|
||||
if (ndev->flags & IFF_LOOPBACK) {
|
||||
/*
|
||||
* RDMA (IB/RoCE, iWarp) doesn't run on lo interface or
|
||||
* loopback IP address. So if route is resolved to loopback
|
||||
* interface, translate that to a real ndev based on non
|
||||
* loopback IP address.
|
||||
*/
|
||||
ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in);
|
||||
if (IS_ERR(ndev))
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return copy_src_l2_addr(dev_addr, dst_in, dst, ndev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr)
|
||||
@@ -551,7 +547,6 @@ static int addr_resolve(struct sockaddr *src_in,
|
||||
u32 seq)
|
||||
{
|
||||
struct dst_entry *dst = NULL;
|
||||
unsigned int ndev_flags = 0;
|
||||
struct rtable *rt = NULL;
|
||||
int ret;
|
||||
|
||||
@@ -588,7 +583,7 @@ static int addr_resolve(struct sockaddr *src_in,
|
||||
rcu_read_unlock();
|
||||
goto done;
|
||||
}
|
||||
ret = rdma_set_src_addr_rcu(addr, &ndev_flags, dst_in, dst);
|
||||
ret = rdma_set_src_addr_rcu(addr, dst_in, dst);
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
@@ -596,7 +591,7 @@ static int addr_resolve(struct sockaddr *src_in,
|
||||
* only if src addr translation didn't fail.
|
||||
*/
|
||||
if (!ret && resolve_neigh)
|
||||
ret = addr_resolve_neigh(dst, dst_in, addr, ndev_flags, seq);
|
||||
ret = addr_resolve_neigh(dst, dst_in, addr, seq);
|
||||
|
||||
if (src_in->sa_family == AF_INET)
|
||||
ip_rt_put(rt);
|
||||
|
||||
@@ -110,8 +110,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
|
||||
agent = port_priv->agent[qpn];
|
||||
ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num);
|
||||
if (IS_ERR(ah)) {
|
||||
dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n",
|
||||
PTR_ERR(ah));
|
||||
dev_err(&device->dev, "ib_create_ah_from_wc error %pe\n", ah);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -1049,8 +1049,8 @@ static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
|
||||
struct cm_id_private *cm_id_priv;
|
||||
|
||||
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
||||
pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
|
||||
cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
|
||||
pr_err_ratelimited("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
|
||||
cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
|
||||
}
|
||||
|
||||
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
|
||||
|
||||
@@ -2076,6 +2076,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
|
||||
kfree(id_priv->id.route.path_rec);
|
||||
kfree(id_priv->id.route.path_rec_inbound);
|
||||
kfree(id_priv->id.route.path_rec_outbound);
|
||||
kfree(id_priv->id.route.service_recs);
|
||||
|
||||
put_net(id_priv->id.route.addr.dev_addr.net);
|
||||
kfree(id_priv);
|
||||
@@ -3382,13 +3383,18 @@ err1:
|
||||
int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
|
||||
{
|
||||
struct rdma_id_private *id_priv;
|
||||
enum rdma_cm_state state;
|
||||
int ret;
|
||||
|
||||
if (!timeout_ms)
|
||||
return -EINVAL;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
|
||||
state = id_priv->state;
|
||||
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
|
||||
RDMA_CM_ROUTE_QUERY) &&
|
||||
!cma_comp_exch(id_priv, RDMA_CM_ADDRINFO_RESOLVED,
|
||||
RDMA_CM_ROUTE_QUERY))
|
||||
return -EINVAL;
|
||||
|
||||
cma_id_get(id_priv);
|
||||
@@ -3409,7 +3415,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
|
||||
|
||||
return 0;
|
||||
err:
|
||||
cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
|
||||
cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, state);
|
||||
cma_id_put(id_priv);
|
||||
return ret;
|
||||
}
|
||||
@@ -5506,3 +5512,129 @@ static void __exit cma_cleanup(void)
|
||||
|
||||
module_init(cma_init);
|
||||
module_exit(cma_cleanup);
|
||||
|
||||
static void cma_query_ib_service_handler(int status,
|
||||
struct sa_service_rec *recs,
|
||||
unsigned int num_recs, void *context)
|
||||
{
|
||||
struct cma_work *work = context;
|
||||
struct rdma_id_private *id_priv = work->id;
|
||||
struct sockaddr_ib *addr;
|
||||
|
||||
if (status)
|
||||
goto fail;
|
||||
|
||||
if (!num_recs) {
|
||||
status = -ENOENT;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (id_priv->id.route.service_recs) {
|
||||
status = -EALREADY;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
id_priv->id.route.service_recs =
|
||||
kmalloc_array(num_recs, sizeof(*recs), GFP_KERNEL);
|
||||
if (!id_priv->id.route.service_recs) {
|
||||
status = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
id_priv->id.route.num_service_recs = num_recs;
|
||||
memcpy(id_priv->id.route.service_recs, recs, sizeof(*recs) * num_recs);
|
||||
|
||||
addr = (struct sockaddr_ib *)&id_priv->id.route.addr.dst_addr;
|
||||
addr->sib_family = AF_IB;
|
||||
addr->sib_addr = *(struct ib_addr *)&recs->gid;
|
||||
addr->sib_pkey = recs->pkey;
|
||||
addr->sib_sid = recs->id;
|
||||
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr,
|
||||
(union ib_gid *)&addr->sib_addr);
|
||||
ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr,
|
||||
ntohs(addr->sib_pkey));
|
||||
|
||||
queue_work(cma_wq, &work->work);
|
||||
return;
|
||||
|
||||
fail:
|
||||
work->old_state = RDMA_CM_ADDRINFO_QUERY;
|
||||
work->new_state = RDMA_CM_ADDR_BOUND;
|
||||
work->event.event = RDMA_CM_EVENT_ADDRINFO_ERROR;
|
||||
work->event.status = status;
|
||||
pr_debug_ratelimited(
|
||||
"RDMA CM: SERVICE_ERROR: failed to query service record. status %d\n",
|
||||
status);
|
||||
queue_work(cma_wq, &work->work);
|
||||
}
|
||||
|
||||
static int cma_resolve_ib_service(struct rdma_id_private *id_priv,
|
||||
struct rdma_ucm_ib_service *ibs)
|
||||
{
|
||||
struct sa_service_rec sr = {};
|
||||
ib_sa_comp_mask mask = 0;
|
||||
struct cma_work *work;
|
||||
|
||||
work = kzalloc(sizeof(*work), GFP_KERNEL);
|
||||
if (!work)
|
||||
return -ENOMEM;
|
||||
|
||||
cma_id_get(id_priv);
|
||||
|
||||
work->id = id_priv;
|
||||
INIT_WORK(&work->work, cma_work_handler);
|
||||
work->old_state = RDMA_CM_ADDRINFO_QUERY;
|
||||
work->new_state = RDMA_CM_ADDRINFO_RESOLVED;
|
||||
work->event.event = RDMA_CM_EVENT_ADDRINFO_RESOLVED;
|
||||
|
||||
if (ibs->flags & RDMA_USER_CM_IB_SERVICE_FLAG_ID) {
|
||||
sr.id = cpu_to_be64(ibs->service_id);
|
||||
mask |= IB_SA_SERVICE_REC_SERVICE_ID;
|
||||
}
|
||||
if (ibs->flags & RDMA_USER_CM_IB_SERVICE_FLAG_NAME) {
|
||||
strscpy(sr.name, ibs->service_name, sizeof(sr.name));
|
||||
mask |= IB_SA_SERVICE_REC_SERVICE_NAME;
|
||||
}
|
||||
|
||||
id_priv->query_id = ib_sa_service_rec_get(&sa_client,
|
||||
id_priv->id.device,
|
||||
id_priv->id.port_num,
|
||||
&sr, mask,
|
||||
2000, GFP_KERNEL,
|
||||
cma_query_ib_service_handler,
|
||||
work, &id_priv->query);
|
||||
|
||||
if (id_priv->query_id < 0) {
|
||||
cma_id_put(id_priv);
|
||||
kfree(work);
|
||||
return id_priv->query_id;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rdma_resolve_ib_service(struct rdma_cm_id *id,
|
||||
struct rdma_ucm_ib_service *ibs)
|
||||
{
|
||||
struct rdma_id_private *id_priv;
|
||||
int ret;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
if (!id_priv->cma_dev ||
|
||||
!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDRINFO_QUERY))
|
||||
return -EINVAL;
|
||||
|
||||
if (rdma_cap_ib_sa(id->device, id->port_num))
|
||||
ret = cma_resolve_ib_service(id_priv, ibs);
|
||||
else
|
||||
ret = -EOPNOTSUPP;
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
cma_comp_exch(id_priv, RDMA_CM_ADDRINFO_QUERY, RDMA_CM_ADDR_BOUND);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_resolve_ib_service);
|
||||
|
||||
@@ -47,7 +47,9 @@ enum rdma_cm_state {
|
||||
RDMA_CM_ADDR_BOUND,
|
||||
RDMA_CM_LISTEN,
|
||||
RDMA_CM_DEVICE_REMOVAL,
|
||||
RDMA_CM_DESTROYING
|
||||
RDMA_CM_DESTROYING,
|
||||
RDMA_CM_ADDRINFO_QUERY,
|
||||
RDMA_CM_ADDRINFO_RESOLVED
|
||||
};
|
||||
|
||||
struct rdma_id_private {
|
||||
|
||||
@@ -1543,7 +1543,7 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
|
||||
|
||||
/*
|
||||
* We have a registration lock so that all the calls to unregister are
|
||||
* fully fenced, once any unregister returns the device is truely
|
||||
* fully fenced, once any unregister returns the device is truly
|
||||
* unregistered even if multiple callers are unregistering it at the
|
||||
* same time. This also interacts with the registration flow and
|
||||
* provides sane semantics if register and unregister are racing.
|
||||
|
||||
@@ -107,6 +107,8 @@ struct ib_sa_device {
|
||||
struct ib_sa_query {
|
||||
void (*callback)(struct ib_sa_query *sa_query, int status,
|
||||
struct ib_sa_mad *mad);
|
||||
void (*rmpp_callback)(struct ib_sa_query *sa_query, int status,
|
||||
struct ib_mad_recv_wc *mad);
|
||||
void (*release)(struct ib_sa_query *);
|
||||
struct ib_sa_client *client;
|
||||
struct ib_sa_port *port;
|
||||
@@ -150,6 +152,13 @@ struct ib_sa_mcmember_query {
|
||||
struct ib_sa_query sa_query;
|
||||
};
|
||||
|
||||
struct ib_sa_service_query {
|
||||
void (*callback)(int status, struct sa_service_rec *rec,
|
||||
unsigned int num_services, void *context);
|
||||
void *context;
|
||||
struct ib_sa_query sa_query;
|
||||
};
|
||||
|
||||
static LIST_HEAD(ib_nl_request_list);
|
||||
static DEFINE_SPINLOCK(ib_nl_request_lock);
|
||||
static atomic_t ib_nl_sa_request_seq;
|
||||
@@ -684,6 +693,58 @@ static const struct ib_field guidinfo_rec_table[] = {
|
||||
.size_bits = 512 },
|
||||
};
|
||||
|
||||
#define SERVICE_REC_FIELD(field) \
|
||||
.struct_offset_bytes = offsetof(struct sa_service_rec, field), \
|
||||
.struct_size_bytes = sizeof_field(struct sa_service_rec, field), \
|
||||
.field_name = "sa_service_rec:" #field
|
||||
|
||||
static const struct ib_field service_rec_table[] = {
|
||||
{ SERVICE_REC_FIELD(id),
|
||||
.offset_words = 0,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 64 },
|
||||
{ SERVICE_REC_FIELD(gid),
|
||||
.offset_words = 2,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 128 },
|
||||
{ SERVICE_REC_FIELD(pkey),
|
||||
.offset_words = 6,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 16 },
|
||||
{ RESERVED,
|
||||
.offset_words = 6,
|
||||
.offset_bits = 16,
|
||||
.size_bits = 16 },
|
||||
{ SERVICE_REC_FIELD(lease),
|
||||
.offset_words = 7,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 32 },
|
||||
{ SERVICE_REC_FIELD(key),
|
||||
.offset_words = 8,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 128 },
|
||||
{ SERVICE_REC_FIELD(name),
|
||||
.offset_words = 12,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 512 },
|
||||
{ SERVICE_REC_FIELD(data_8),
|
||||
.offset_words = 28,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 128 },
|
||||
{ SERVICE_REC_FIELD(data_16),
|
||||
.offset_words = 32,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 128 },
|
||||
{ SERVICE_REC_FIELD(data_32),
|
||||
.offset_words = 36,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 128 },
|
||||
{ SERVICE_REC_FIELD(data_64),
|
||||
.offset_words = 40,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 128 },
|
||||
};
|
||||
|
||||
#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
|
||||
|
||||
static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
|
||||
@@ -1013,6 +1074,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
|
||||
if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
|
||||
timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
|
||||
|
||||
spin_lock_irqsave(&ib_nl_request_lock, flags);
|
||||
|
||||
delta = timeout - sa_local_svc_timeout_ms;
|
||||
if (delta < 0)
|
||||
abs_delta = -delta;
|
||||
@@ -1020,7 +1083,6 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
|
||||
abs_delta = delta;
|
||||
|
||||
if (delta != 0) {
|
||||
spin_lock_irqsave(&ib_nl_request_lock, flags);
|
||||
sa_local_svc_timeout_ms = timeout;
|
||||
list_for_each_entry(query, &ib_nl_request_list, list) {
|
||||
if (delta < 0 && abs_delta > query->timeout)
|
||||
@@ -1038,9 +1100,10 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
|
||||
if (delay)
|
||||
mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
|
||||
(unsigned long)delay);
|
||||
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
|
||||
|
||||
settimeout_out:
|
||||
return 0;
|
||||
}
|
||||
@@ -1390,6 +1453,20 @@ void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
|
||||
}
|
||||
EXPORT_SYMBOL(ib_sa_pack_path);
|
||||
|
||||
void ib_sa_pack_service(struct sa_service_rec *rec, void *attribute)
|
||||
{
|
||||
ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), rec,
|
||||
attribute);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_sa_pack_service);
|
||||
|
||||
void ib_sa_unpack_service(void *attribute, struct sa_service_rec *rec)
|
||||
{
|
||||
ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), attribute,
|
||||
rec);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_sa_unpack_service);
|
||||
|
||||
static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
|
||||
struct ib_sa_device *sa_dev,
|
||||
u32 port_num)
|
||||
@@ -1479,6 +1556,68 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
|
||||
}
|
||||
}
|
||||
|
||||
#define IB_SA_DATA_OFFS 56
|
||||
#define IB_SERVICE_REC_SZ 176
|
||||
|
||||
static void ib_unpack_service_rmpp(struct sa_service_rec *rec,
|
||||
struct ib_mad_recv_wc *mad_wc,
|
||||
int num_services)
|
||||
{
|
||||
unsigned int cp_sz, data_i, data_size, rec_i = 0, buf_i = 0;
|
||||
struct ib_mad_recv_buf *mad_buf;
|
||||
u8 buf[IB_SERVICE_REC_SZ];
|
||||
u8 *data;
|
||||
|
||||
data_size = sizeof(((struct ib_sa_mad *) mad_buf->mad)->data);
|
||||
|
||||
list_for_each_entry(mad_buf, &mad_wc->rmpp_list, list) {
|
||||
data = ((struct ib_sa_mad *) mad_buf->mad)->data;
|
||||
data_i = 0;
|
||||
while (data_i < data_size && rec_i < num_services) {
|
||||
cp_sz = min(IB_SERVICE_REC_SZ - buf_i,
|
||||
data_size - data_i);
|
||||
memcpy(buf + buf_i, data + data_i, cp_sz);
|
||||
data_i += cp_sz;
|
||||
buf_i += cp_sz;
|
||||
if (buf_i == IB_SERVICE_REC_SZ) {
|
||||
ib_sa_unpack_service(buf, rec + rec_i);
|
||||
buf_i = 0;
|
||||
rec_i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, int status,
|
||||
struct ib_mad_recv_wc *mad_wc)
|
||||
{
|
||||
struct ib_sa_service_query *query =
|
||||
container_of(sa_query, struct ib_sa_service_query, sa_query);
|
||||
struct sa_service_rec *rec;
|
||||
int num_services;
|
||||
|
||||
if (!mad_wc || !mad_wc->recv_buf.mad) {
|
||||
query->callback(status, NULL, 0, query->context);
|
||||
return;
|
||||
}
|
||||
|
||||
num_services = (mad_wc->mad_len - IB_SA_DATA_OFFS) / IB_SERVICE_REC_SZ;
|
||||
if (!num_services) {
|
||||
query->callback(-ENODATA, NULL, 0, query->context);
|
||||
return;
|
||||
}
|
||||
|
||||
rec = kmalloc_array(num_services, sizeof(*rec), GFP_KERNEL);
|
||||
if (!rec) {
|
||||
query->callback(-ENOMEM, NULL, 0, query->context);
|
||||
return;
|
||||
}
|
||||
|
||||
ib_unpack_service_rmpp(rec, mad_wc, num_services);
|
||||
query->callback(status, rec, num_services, query->context);
|
||||
kfree(rec);
|
||||
}
|
||||
|
||||
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
|
||||
{
|
||||
struct ib_sa_path_query *query =
|
||||
@@ -1488,6 +1627,14 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
|
||||
kfree(query);
|
||||
}
|
||||
|
||||
static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
|
||||
{
|
||||
struct ib_sa_service_query *query =
|
||||
container_of(sa_query, struct ib_sa_service_query, sa_query);
|
||||
|
||||
kfree(query);
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_sa_path_rec_get - Start a Path get query
|
||||
* @client:SA client
|
||||
@@ -1618,6 +1765,101 @@ err1:
|
||||
}
|
||||
EXPORT_SYMBOL(ib_sa_path_rec_get);
|
||||
|
||||
/**
|
||||
* ib_sa_service_rec_get - Start a Service get query
|
||||
* @client: SA client
|
||||
* @device: device to send query on
|
||||
* @port_num: port number to send query on
|
||||
* @rec: Service Record to send in query
|
||||
* @comp_mask: component mask to send in query
|
||||
* @timeout_ms: time to wait for response
|
||||
* @gfp_mask: GFP mask to use for internal allocations
|
||||
* @callback: function called when query completes, times out or is
|
||||
* canceled
|
||||
* @context: opaque user context passed to callback
|
||||
* @sa_query: query context, used to cancel query
|
||||
*
|
||||
* Send a Service Record Get query to the SA to look up a path. The
|
||||
* callback function will be called when the query completes (or
|
||||
* fails); status is 0 for a successful response, -EINTR if the query
|
||||
* is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
|
||||
* occurred sending the query. The resp parameter of the callback is
|
||||
* only valid if status is 0.
|
||||
*
|
||||
* If the return value of ib_sa_service_rec_get() is negative, it is an
|
||||
* error code. Otherwise it is a query ID that can be used to cancel
|
||||
* the query.
|
||||
*/
|
||||
int ib_sa_service_rec_get(struct ib_sa_client *client,
|
||||
struct ib_device *device, u32 port_num,
|
||||
struct sa_service_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
unsigned long timeout_ms, gfp_t gfp_mask,
|
||||
void (*callback)(int status,
|
||||
struct sa_service_rec *resp,
|
||||
unsigned int num_services,
|
||||
void *context),
|
||||
void *context, struct ib_sa_query **sa_query)
|
||||
{
|
||||
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
|
||||
struct ib_sa_service_query *query;
|
||||
struct ib_mad_agent *agent;
|
||||
struct ib_sa_port *port;
|
||||
struct ib_sa_mad *mad;
|
||||
int ret;
|
||||
|
||||
if (!sa_dev)
|
||||
return -ENODEV;
|
||||
|
||||
port = &sa_dev->port[port_num - sa_dev->start_port];
|
||||
agent = port->agent;
|
||||
|
||||
query = kzalloc(sizeof(*query), gfp_mask);
|
||||
if (!query)
|
||||
return -ENOMEM;
|
||||
|
||||
query->sa_query.port = port;
|
||||
|
||||
ret = alloc_mad(&query->sa_query, gfp_mask);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
ib_sa_client_get(client);
|
||||
query->sa_query.client = client;
|
||||
query->callback = callback;
|
||||
query->context = context;
|
||||
|
||||
mad = query->sa_query.mad_buf->mad;
|
||||
init_mad(&query->sa_query, agent);
|
||||
|
||||
query->sa_query.rmpp_callback = callback ? ib_sa_service_rec_callback :
|
||||
NULL;
|
||||
query->sa_query.release = ib_sa_service_rec_release;
|
||||
mad->mad_hdr.method = IB_MGMT_METHOD_GET_TABLE;
|
||||
mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
|
||||
mad->sa_hdr.comp_mask = comp_mask;
|
||||
|
||||
ib_sa_pack_service(rec, mad->data);
|
||||
|
||||
*sa_query = &query->sa_query;
|
||||
query->sa_query.mad_buf->context[1] = rec;
|
||||
|
||||
ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
|
||||
if (ret < 0)
|
||||
goto err2;
|
||||
|
||||
return ret;
|
||||
|
||||
err2:
|
||||
*sa_query = NULL;
|
||||
ib_sa_client_put(query->sa_query.client);
|
||||
free_mad(&query->sa_query);
|
||||
err1:
|
||||
kfree(query);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_sa_service_rec_get);
|
||||
|
||||
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
|
||||
int status, struct ib_sa_mad *mad)
|
||||
{
|
||||
@@ -1987,23 +2229,29 @@ static void send_handler(struct ib_mad_agent *agent,
|
||||
{
|
||||
struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
|
||||
unsigned long flags;
|
||||
int status = 0;
|
||||
|
||||
if (query->callback)
|
||||
if (query->callback || query->rmpp_callback) {
|
||||
switch (mad_send_wc->status) {
|
||||
case IB_WC_SUCCESS:
|
||||
/* No callback -- already got recv */
|
||||
break;
|
||||
case IB_WC_RESP_TIMEOUT_ERR:
|
||||
query->callback(query, -ETIMEDOUT, NULL);
|
||||
status = -ETIMEDOUT;
|
||||
break;
|
||||
case IB_WC_WR_FLUSH_ERR:
|
||||
query->callback(query, -EINTR, NULL);
|
||||
status = -EINTR;
|
||||
break;
|
||||
default:
|
||||
query->callback(query, -EIO, NULL);
|
||||
status = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
if (status)
|
||||
query->callback ? query->callback(query, status, NULL) :
|
||||
query->rmpp_callback(query, status, NULL);
|
||||
}
|
||||
|
||||
xa_lock_irqsave(&queries, flags);
|
||||
__xa_erase(&queries, query->id);
|
||||
xa_unlock_irqrestore(&queries, flags);
|
||||
@@ -2019,17 +2267,25 @@ static void recv_handler(struct ib_mad_agent *mad_agent,
|
||||
struct ib_mad_recv_wc *mad_recv_wc)
|
||||
{
|
||||
struct ib_sa_query *query;
|
||||
struct ib_mad *mad;
|
||||
|
||||
|
||||
if (!send_buf)
|
||||
return;
|
||||
|
||||
query = send_buf->context[0];
|
||||
if (query->callback) {
|
||||
mad = mad_recv_wc->recv_buf.mad;
|
||||
|
||||
if (query->rmpp_callback) {
|
||||
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
|
||||
query->callback(query,
|
||||
mad_recv_wc->recv_buf.mad->mad_hdr.status ?
|
||||
-EINVAL : 0,
|
||||
(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
|
||||
query->rmpp_callback(query, mad->mad_hdr.status ?
|
||||
-EINVAL : 0, mad_recv_wc);
|
||||
else
|
||||
query->rmpp_callback(query, -EIO, NULL);
|
||||
} else if (query->callback) {
|
||||
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
|
||||
query->callback(query, mad->mad_hdr.status ?
|
||||
-EINVAL : 0, (struct ib_sa_mad *)mad);
|
||||
else
|
||||
query->callback(query, -EIO, NULL);
|
||||
}
|
||||
@@ -2181,8 +2437,9 @@ static int ib_sa_add_one(struct ib_device *device)
|
||||
|
||||
sa_dev->port[i].agent =
|
||||
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
|
||||
NULL, 0, send_handler,
|
||||
recv_handler, sa_dev, 0);
|
||||
NULL, IB_MGMT_RMPP_VERSION,
|
||||
send_handler, recv_handler,
|
||||
sa_dev, 0);
|
||||
if (IS_ERR(sa_dev->port[i].agent)) {
|
||||
ret = PTR_ERR(sa_dev->port[i].agent);
|
||||
goto err;
|
||||
|
||||
@@ -282,6 +282,10 @@ static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx,
|
||||
}
|
||||
uevent->resp.event = event->event;
|
||||
uevent->resp.status = event->status;
|
||||
|
||||
if (event->event == RDMA_CM_EVENT_ADDRINFO_RESOLVED)
|
||||
goto out;
|
||||
|
||||
if (ctx->cm_id->qp_type == IB_QPT_UD)
|
||||
ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud,
|
||||
&event->param.ud);
|
||||
@@ -289,6 +293,7 @@ static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx,
|
||||
ucma_copy_conn_event(&uevent->resp.param.conn,
|
||||
&event->param.conn);
|
||||
|
||||
out:
|
||||
uevent->resp.ece.vendor_id = event->ece.vendor_id;
|
||||
uevent->resp.ece.attr_mod = event->ece.attr_mod;
|
||||
return uevent;
|
||||
@@ -728,6 +733,28 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t ucma_resolve_ib_service(struct ucma_file *file,
|
||||
const char __user *inbuf, int in_len,
|
||||
int out_len)
|
||||
{
|
||||
struct rdma_ucm_resolve_ib_service cmd;
|
||||
struct ucma_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
ctx = ucma_get_ctx(file, cmd.id);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_resolve_ib_service(ctx->cm_id, &cmd.ibs);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t ucma_resolve_route(struct ucma_file *file,
|
||||
const char __user *inbuf,
|
||||
int in_len, int out_len)
|
||||
@@ -994,6 +1021,43 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t ucma_query_ib_service(struct ucma_context *ctx,
|
||||
void __user *response, int out_len)
|
||||
{
|
||||
struct rdma_ucm_query_ib_service_resp *resp;
|
||||
int n, ret = 0;
|
||||
|
||||
if (out_len < sizeof(struct rdma_ucm_query_ib_service_resp))
|
||||
return -ENOSPC;
|
||||
|
||||
if (!ctx->cm_id->route.service_recs)
|
||||
return -ENODATA;
|
||||
|
||||
resp = kzalloc(out_len, GFP_KERNEL);
|
||||
if (!resp)
|
||||
return -ENOMEM;
|
||||
|
||||
resp->num_service_recs = ctx->cm_id->route.num_service_recs;
|
||||
|
||||
n = (out_len - sizeof(struct rdma_ucm_query_ib_service_resp)) /
|
||||
sizeof(struct ib_user_service_rec);
|
||||
|
||||
if (!n)
|
||||
goto out;
|
||||
|
||||
if (n > ctx->cm_id->route.num_service_recs)
|
||||
n = ctx->cm_id->route.num_service_recs;
|
||||
|
||||
memcpy(resp->recs, ctx->cm_id->route.service_recs,
|
||||
sizeof(*resp->recs) * n);
|
||||
if (copy_to_user(response, resp, struct_size(resp, recs, n)))
|
||||
ret = -EFAULT;
|
||||
|
||||
out:
|
||||
kfree(resp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t ucma_query(struct ucma_file *file,
|
||||
const char __user *inbuf,
|
||||
int in_len, int out_len)
|
||||
@@ -1022,6 +1086,9 @@ static ssize_t ucma_query(struct ucma_file *file,
|
||||
case RDMA_USER_CM_QUERY_GID:
|
||||
ret = ucma_query_gid(ctx, response, out_len);
|
||||
break;
|
||||
case RDMA_USER_CM_QUERY_IB_SERVICE:
|
||||
ret = ucma_query_ib_service(ctx, response, out_len);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
@@ -1678,6 +1745,55 @@ err_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t ucma_write_cm_event(struct ucma_file *file,
|
||||
const char __user *inbuf, int in_len,
|
||||
int out_len)
|
||||
{
|
||||
struct rdma_ucm_write_cm_event cmd;
|
||||
struct rdma_cm_event event = {};
|
||||
struct ucma_event *uevent;
|
||||
struct ucma_context *ctx;
|
||||
int ret = 0;
|
||||
|
||||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
if ((cmd.event != RDMA_CM_EVENT_USER) &&
|
||||
(cmd.event != RDMA_CM_EVENT_INTERNAL))
|
||||
return -EINVAL;
|
||||
|
||||
ctx = ucma_get_ctx(file, cmd.id);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
event.event = cmd.event;
|
||||
event.status = cmd.status;
|
||||
event.param.arg = cmd.param.arg;
|
||||
|
||||
uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
|
||||
if (!uevent) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
uevent->ctx = ctx;
|
||||
uevent->resp.uid = ctx->uid;
|
||||
uevent->resp.id = ctx->id;
|
||||
uevent->resp.event = event.event;
|
||||
uevent->resp.status = event.status;
|
||||
memcpy(uevent->resp.param.arg32, &event.param.arg,
|
||||
sizeof(event.param.arg));
|
||||
|
||||
mutex_lock(&ctx->file->mut);
|
||||
list_add_tail(&uevent->list, &ctx->file->event_list);
|
||||
mutex_unlock(&ctx->file->mut);
|
||||
wake_up_interruptible(&ctx->file->poll_wait);
|
||||
|
||||
out:
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
|
||||
const char __user *inbuf,
|
||||
int in_len, int out_len) = {
|
||||
@@ -1703,7 +1819,9 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
|
||||
[RDMA_USER_CM_CMD_QUERY] = ucma_query,
|
||||
[RDMA_USER_CM_CMD_BIND] = ucma_bind,
|
||||
[RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
|
||||
[RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
|
||||
[RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
|
||||
[RDMA_USER_CM_CMD_RESOLVE_IB_SERVICE] = ucma_resolve_ib_service,
|
||||
[RDMA_USER_CM_CMD_WRITE_CM_EVENT] = ucma_write_cm_event,
|
||||
};
|
||||
|
||||
static ssize_t ucma_write(struct file *filp, const char __user *buf,
|
||||
|
||||
@@ -14,3 +14,4 @@ obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns/
|
||||
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
|
||||
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
|
||||
obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/
|
||||
obj-$(CONFIG_INFINIBAND_IONIC) += ionic/
|
||||
|
||||
@@ -172,9 +172,9 @@ struct bnxt_re_dev {
|
||||
struct list_head list;
|
||||
unsigned long flags;
|
||||
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
|
||||
#define BNXT_RE_FLAG_STATS_CTX3_ALLOC 1
|
||||
#define BNXT_RE_FLAG_HAVE_L2_REF 3
|
||||
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
|
||||
#define BNXT_RE_FLAG_QOS_WORK_REG 5
|
||||
#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
|
||||
#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
|
||||
#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
|
||||
@@ -187,9 +187,6 @@ struct bnxt_re_dev {
|
||||
|
||||
int id;
|
||||
|
||||
struct delayed_work worker;
|
||||
u8 cur_prio_map;
|
||||
|
||||
/* RCFW Channel */
|
||||
struct bnxt_qplib_rcfw rcfw;
|
||||
|
||||
@@ -227,6 +224,13 @@ struct bnxt_re_dev {
|
||||
struct workqueue_struct *dcb_wq;
|
||||
struct dentry *cc_config;
|
||||
struct bnxt_re_dbg_cc_config_params *cc_config_params;
|
||||
#define BNXT_VPD_FLD_LEN 32
|
||||
char board_partno[BNXT_VPD_FLD_LEN];
|
||||
/* RoCE mirror */
|
||||
u16 mirror_vnic_id;
|
||||
union ib_gid ugid;
|
||||
u32 ugid_index;
|
||||
u8 sniffer_flow_created : 1;
|
||||
};
|
||||
|
||||
#define to_bnxt_re_dev(ptr, member) \
|
||||
@@ -243,6 +247,10 @@ int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *ou
|
||||
int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev,
|
||||
struct ib_mad *out_mad);
|
||||
|
||||
void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev);
|
||||
int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev);
|
||||
int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id);
|
||||
|
||||
static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
if (rdev)
|
||||
@@ -276,4 +284,7 @@ static inline int bnxt_re_read_context_allowed(struct bnxt_re_dev *rdev)
|
||||
#define BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 192
|
||||
#define BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P7 192
|
||||
|
||||
#define BNXT_RE_HWRM_CMD_TIMEOUT(rdev) \
|
||||
((rdev)->chip_ctx->hwrm_cmd_max_timeout * 1000)
|
||||
|
||||
#endif
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
|
||||
#include "bnxt_ulp.h"
|
||||
@@ -314,6 +315,40 @@ static const struct file_operations bnxt_re_cc_config_ops = {
|
||||
.write = bnxt_re_cc_config_set,
|
||||
};
|
||||
|
||||
static int info_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = m->private;
|
||||
struct bnxt_re_res_cntrs *res_s = &rdev->stats.res;
|
||||
|
||||
seq_puts(m, "Info:\n");
|
||||
seq_printf(m, "Device Name\t\t: %s\n", dev_name(&rdev->ibdev.dev));
|
||||
seq_printf(m, "PD Watermark\t\t: %llu\n", res_s->pd_watermark);
|
||||
seq_printf(m, "AH Watermark\t\t: %llu\n", res_s->ah_watermark);
|
||||
seq_printf(m, "QP Watermark\t\t: %llu\n", res_s->qp_watermark);
|
||||
seq_printf(m, "RC QP Watermark\t\t: %llu\n", res_s->rc_qp_watermark);
|
||||
seq_printf(m, "UD QP Watermark\t\t: %llu\n", res_s->ud_qp_watermark);
|
||||
seq_printf(m, "SRQ Watermark\t\t: %llu\n", res_s->srq_watermark);
|
||||
seq_printf(m, "CQ Watermark\t\t: %llu\n", res_s->cq_watermark);
|
||||
seq_printf(m, "MR Watermark\t\t: %llu\n", res_s->mr_watermark);
|
||||
seq_printf(m, "MW Watermark\t\t: %llu\n", res_s->mw_watermark);
|
||||
seq_printf(m, "CQ Resize Count\t\t: %d\n", atomic_read(&res_s->resize_count));
|
||||
if (rdev->pacing.dbr_pacing) {
|
||||
seq_printf(m, "DB Pacing Reschedule\t: %llu\n", rdev->stats.pacing.resched);
|
||||
seq_printf(m, "DB Pacing Complete\t: %llu\n", rdev->stats.pacing.complete);
|
||||
seq_printf(m, "DB Pacing Alerts\t: %llu\n", rdev->stats.pacing.alerts);
|
||||
seq_printf(m, "DB FIFO Register\t: 0x%x\n",
|
||||
readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(info);
|
||||
|
||||
static void bnxt_re_debugfs_add_info(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
debugfs_create_file("info", 0400, rdev->dbg_root, rdev, &info_fops);
|
||||
}
|
||||
|
||||
void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct pci_dev *pdev = rdev->en_dev->pdev;
|
||||
@@ -325,6 +360,8 @@ void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev)
|
||||
rdev->qp_debugfs = debugfs_create_dir("QPs", rdev->dbg_root);
|
||||
rdev->cc_config = debugfs_create_dir("cc_config", rdev->dbg_root);
|
||||
|
||||
bnxt_re_debugfs_add_info(rdev);
|
||||
|
||||
rdev->cc_config_params = kzalloc(sizeof(*cc_params), GFP_KERNEL);
|
||||
|
||||
for (i = 0; i < BNXT_RE_CC_PARAM_GEN0; i++) {
|
||||
|
||||
@@ -51,25 +51,6 @@
|
||||
#include "hw_counters.h"
|
||||
|
||||
static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
|
||||
[BNXT_RE_ACTIVE_PD].name = "active_pds",
|
||||
[BNXT_RE_ACTIVE_AH].name = "active_ahs",
|
||||
[BNXT_RE_ACTIVE_QP].name = "active_qps",
|
||||
[BNXT_RE_ACTIVE_RC_QP].name = "active_rc_qps",
|
||||
[BNXT_RE_ACTIVE_UD_QP].name = "active_ud_qps",
|
||||
[BNXT_RE_ACTIVE_SRQ].name = "active_srqs",
|
||||
[BNXT_RE_ACTIVE_CQ].name = "active_cqs",
|
||||
[BNXT_RE_ACTIVE_MR].name = "active_mrs",
|
||||
[BNXT_RE_ACTIVE_MW].name = "active_mws",
|
||||
[BNXT_RE_WATERMARK_PD].name = "watermark_pds",
|
||||
[BNXT_RE_WATERMARK_AH].name = "watermark_ahs",
|
||||
[BNXT_RE_WATERMARK_QP].name = "watermark_qps",
|
||||
[BNXT_RE_WATERMARK_RC_QP].name = "watermark_rc_qps",
|
||||
[BNXT_RE_WATERMARK_UD_QP].name = "watermark_ud_qps",
|
||||
[BNXT_RE_WATERMARK_SRQ].name = "watermark_srqs",
|
||||
[BNXT_RE_WATERMARK_CQ].name = "watermark_cqs",
|
||||
[BNXT_RE_WATERMARK_MR].name = "watermark_mrs",
|
||||
[BNXT_RE_WATERMARK_MW].name = "watermark_mws",
|
||||
[BNXT_RE_RESIZE_CQ_CNT].name = "resize_cq_cnt",
|
||||
[BNXT_RE_RX_PKTS].name = "rx_pkts",
|
||||
[BNXT_RE_RX_BYTES].name = "rx_bytes",
|
||||
[BNXT_RE_TX_PKTS].name = "tx_pkts",
|
||||
@@ -79,22 +60,22 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
|
||||
[BNXT_RE_TX_DISCARDS].name = "tx_roce_discards",
|
||||
[BNXT_RE_RX_ERRORS].name = "rx_roce_errors",
|
||||
[BNXT_RE_RX_DISCARDS].name = "rx_roce_discards",
|
||||
[BNXT_RE_TO_RETRANSMITS].name = "to_retransmits",
|
||||
[BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "seq_err_naks_rcvd",
|
||||
[BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded",
|
||||
[BNXT_RE_RNR_NAKS_RCVD].name = "rnr_naks_rcvd",
|
||||
[BNXT_RE_MISSING_RESP].name = "missing_resp",
|
||||
[BNXT_RE_TO_RETRANSMITS].name = "local_ack_timeout_err",
|
||||
[BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "packet_seq_err",
|
||||
[BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded",
|
||||
[BNXT_RE_RNR_NAKS_RCVD].name = "rnr_nak_retry_err",
|
||||
[BNXT_RE_MISSING_RESP].name = "implied_nak_seq_err",
|
||||
[BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err",
|
||||
[BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err",
|
||||
[BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err",
|
||||
[BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err",
|
||||
[BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err",
|
||||
[BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "remote_invalid_req_err",
|
||||
[BNXT_RE_REMOTE_ACCESS_ERR].name = "remote_access_err",
|
||||
[BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "req_remote_invalid_request",
|
||||
[BNXT_RE_REMOTE_ACCESS_ERR].name = "req_remote_access_errors",
|
||||
[BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err",
|
||||
[BNXT_RE_DUP_REQ].name = "dup_req",
|
||||
[BNXT_RE_DUP_REQ].name = "duplicate_request",
|
||||
[BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max",
|
||||
[BNXT_RE_RES_LENGTH_MISMATCH].name = "res_length_mismatch",
|
||||
[BNXT_RE_RES_LENGTH_MISMATCH].name = "resp_local_length_error",
|
||||
[BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe",
|
||||
[BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err",
|
||||
[BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey",
|
||||
@@ -118,7 +99,7 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
|
||||
[BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err",
|
||||
[BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err",
|
||||
[BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err",
|
||||
[BNXT_RE_OUT_OF_SEQ_ERR].name = "oos_drop_count",
|
||||
[BNXT_RE_OUT_OF_SEQ_ERR].name = "out_of_sequence",
|
||||
[BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req",
|
||||
[BNXT_RE_TX_READ_REQ].name = "tx_read_req",
|
||||
[BNXT_RE_TX_READ_RES].name = "tx_read_resp",
|
||||
@@ -126,23 +107,22 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
|
||||
[BNXT_RE_TX_SEND_REQ].name = "tx_send_req",
|
||||
[BNXT_RE_TX_ROCE_PKTS].name = "tx_roce_only_pkts",
|
||||
[BNXT_RE_TX_ROCE_BYTES].name = "tx_roce_only_bytes",
|
||||
[BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req",
|
||||
[BNXT_RE_RX_READ_REQ].name = "rx_read_req",
|
||||
[BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_requests",
|
||||
[BNXT_RE_RX_READ_REQ].name = "rx_read_requests",
|
||||
[BNXT_RE_RX_READ_RESP].name = "rx_read_resp",
|
||||
[BNXT_RE_RX_WRITE_REQ].name = "rx_write_req",
|
||||
[BNXT_RE_RX_WRITE_REQ].name = "rx_write_requests",
|
||||
[BNXT_RE_RX_SEND_REQ].name = "rx_send_req",
|
||||
[BNXT_RE_RX_ROCE_PKTS].name = "rx_roce_only_pkts",
|
||||
[BNXT_RE_RX_ROCE_BYTES].name = "rx_roce_only_bytes",
|
||||
[BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts",
|
||||
[BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes",
|
||||
[BNXT_RE_OOB].name = "rx_out_of_buffer",
|
||||
[BNXT_RE_TX_CNP].name = "tx_cnp_pkts",
|
||||
[BNXT_RE_RX_CNP].name = "rx_cnp_pkts",
|
||||
[BNXT_RE_RX_ECN].name = "rx_ecn_marked_pkts",
|
||||
[BNXT_RE_PACING_RESCHED].name = "pacing_reschedule",
|
||||
[BNXT_RE_PACING_CMPL].name = "pacing_complete",
|
||||
[BNXT_RE_PACING_ALERT].name = "pacing_alerts",
|
||||
[BNXT_RE_DB_FIFO_REG].name = "db_fifo_register",
|
||||
[BNXT_RE_OOB].name = "out_of_buffer",
|
||||
[BNXT_RE_TX_CNP].name = "np_cnp_pkts",
|
||||
[BNXT_RE_RX_CNP].name = "rp_cnp_handled",
|
||||
[BNXT_RE_RX_ECN].name = "np_ecn_marked_roce_packets",
|
||||
[BNXT_RE_REQ_CQE_ERROR].name = "req_cqe_error",
|
||||
[BNXT_RE_RESP_CQE_ERROR].name = "resp_cqe_error",
|
||||
[BNXT_RE_RESP_REMOTE_ACCESS_ERRS].name = "resp_remote_access_errors",
|
||||
};
|
||||
|
||||
static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
|
||||
@@ -273,18 +253,20 @@ static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev,
|
||||
err_s->res_rx_pci_err;
|
||||
stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
|
||||
err_s->res_oos_drop_count;
|
||||
}
|
||||
|
||||
static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
|
||||
struct rdma_hw_stats *stats)
|
||||
{
|
||||
struct bnxt_re_db_pacing_stats *pacing_s = &rdev->stats.pacing;
|
||||
|
||||
stats->value[BNXT_RE_PACING_RESCHED] = pacing_s->resched;
|
||||
stats->value[BNXT_RE_PACING_CMPL] = pacing_s->complete;
|
||||
stats->value[BNXT_RE_PACING_ALERT] = pacing_s->alerts;
|
||||
stats->value[BNXT_RE_DB_FIFO_REG] =
|
||||
readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
|
||||
stats->value[BNXT_RE_REQ_CQE_ERROR] =
|
||||
err_s->bad_resp_err +
|
||||
err_s->local_qp_op_err +
|
||||
err_s->local_protection_err +
|
||||
err_s->mem_mgmt_op_err +
|
||||
err_s->remote_invalid_req_err +
|
||||
err_s->remote_access_err +
|
||||
err_s->remote_op_err;
|
||||
stats->value[BNXT_RE_RESP_CQE_ERROR] =
|
||||
err_s->res_cmp_err +
|
||||
err_s->res_cq_load_err;
|
||||
stats->value[BNXT_RE_RESP_REMOTE_ACCESS_ERRS] =
|
||||
err_s->res_rx_no_perm +
|
||||
err_s->res_tx_no_perm;
|
||||
}
|
||||
|
||||
int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
|
||||
@@ -382,7 +364,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
|
||||
u32 port, int index)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
|
||||
struct bnxt_re_res_cntrs *res_s = &rdev->stats.res;
|
||||
struct bnxt_qplib_roce_stats *err_s = NULL;
|
||||
struct ctx_hw_stats *hw_stats = NULL;
|
||||
int rc = 0;
|
||||
@@ -391,26 +372,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
|
||||
if (!port || !stats)
|
||||
return -EINVAL;
|
||||
|
||||
stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&res_s->qp_count);
|
||||
stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&res_s->rc_qp_count);
|
||||
stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&res_s->ud_qp_count);
|
||||
stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&res_s->srq_count);
|
||||
stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&res_s->cq_count);
|
||||
stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&res_s->mr_count);
|
||||
stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&res_s->mw_count);
|
||||
stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&res_s->pd_count);
|
||||
stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&res_s->ah_count);
|
||||
stats->value[BNXT_RE_WATERMARK_QP] = res_s->qp_watermark;
|
||||
stats->value[BNXT_RE_WATERMARK_RC_QP] = res_s->rc_qp_watermark;
|
||||
stats->value[BNXT_RE_WATERMARK_UD_QP] = res_s->ud_qp_watermark;
|
||||
stats->value[BNXT_RE_WATERMARK_SRQ] = res_s->srq_watermark;
|
||||
stats->value[BNXT_RE_WATERMARK_CQ] = res_s->cq_watermark;
|
||||
stats->value[BNXT_RE_WATERMARK_MR] = res_s->mr_watermark;
|
||||
stats->value[BNXT_RE_WATERMARK_MW] = res_s->mw_watermark;
|
||||
stats->value[BNXT_RE_WATERMARK_PD] = res_s->pd_watermark;
|
||||
stats->value[BNXT_RE_WATERMARK_AH] = res_s->ah_watermark;
|
||||
stats->value[BNXT_RE_RESIZE_CQ_CNT] = atomic_read(&res_s->resize_count);
|
||||
|
||||
if (hw_stats) {
|
||||
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
|
||||
le64_to_cpu(hw_stats->tx_bcast_pkts);
|
||||
@@ -449,8 +410,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
|
||||
bnxt_re_copy_db_pacing_stats(rdev, stats);
|
||||
}
|
||||
|
||||
done:
|
||||
|
||||
@@ -41,25 +41,6 @@
|
||||
#define __BNXT_RE_HW_STATS_H__
|
||||
|
||||
enum bnxt_re_hw_stats {
|
||||
BNXT_RE_ACTIVE_PD,
|
||||
BNXT_RE_ACTIVE_AH,
|
||||
BNXT_RE_ACTIVE_QP,
|
||||
BNXT_RE_ACTIVE_RC_QP,
|
||||
BNXT_RE_ACTIVE_UD_QP,
|
||||
BNXT_RE_ACTIVE_SRQ,
|
||||
BNXT_RE_ACTIVE_CQ,
|
||||
BNXT_RE_ACTIVE_MR,
|
||||
BNXT_RE_ACTIVE_MW,
|
||||
BNXT_RE_WATERMARK_PD,
|
||||
BNXT_RE_WATERMARK_AH,
|
||||
BNXT_RE_WATERMARK_QP,
|
||||
BNXT_RE_WATERMARK_RC_QP,
|
||||
BNXT_RE_WATERMARK_UD_QP,
|
||||
BNXT_RE_WATERMARK_SRQ,
|
||||
BNXT_RE_WATERMARK_CQ,
|
||||
BNXT_RE_WATERMARK_MR,
|
||||
BNXT_RE_WATERMARK_MW,
|
||||
BNXT_RE_RESIZE_CQ_CNT,
|
||||
BNXT_RE_RX_PKTS,
|
||||
BNXT_RE_RX_BYTES,
|
||||
BNXT_RE_TX_PKTS,
|
||||
@@ -129,10 +110,9 @@ enum bnxt_re_hw_stats {
|
||||
BNXT_RE_TX_CNP,
|
||||
BNXT_RE_RX_CNP,
|
||||
BNXT_RE_RX_ECN,
|
||||
BNXT_RE_PACING_RESCHED,
|
||||
BNXT_RE_PACING_CMPL,
|
||||
BNXT_RE_PACING_ALERT,
|
||||
BNXT_RE_DB_FIFO_REG,
|
||||
BNXT_RE_REQ_CQE_ERROR,
|
||||
BNXT_RE_RESP_CQE_ERROR,
|
||||
BNXT_RE_RESP_REMOTE_ACCESS_ERRS,
|
||||
BNXT_RE_NUM_EXT_COUNTERS
|
||||
};
|
||||
|
||||
|
||||
@@ -288,7 +288,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
|
||||
}
|
||||
port_attr->max_mtu = IB_MTU_4096;
|
||||
port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
|
||||
port_attr->gid_tbl_len = dev_attr->max_sgid;
|
||||
/* One GID is reserved for RawEth QP. Report one less */
|
||||
port_attr->gid_tbl_len = (rdev->rcfw.roce_mirror ? (dev_attr->max_sgid - 1) :
|
||||
dev_attr->max_sgid);
|
||||
port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
|
||||
IB_PORT_DEVICE_MGMT_SUP |
|
||||
IB_PORT_VENDOR_CLASS_SUP;
|
||||
@@ -375,7 +377,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
|
||||
if (!ctx)
|
||||
return -EINVAL;
|
||||
|
||||
if (sgid_tbl && sgid_tbl->active) {
|
||||
if (sgid_tbl->active) {
|
||||
if (ctx->idx >= sgid_tbl->max)
|
||||
return -EINVAL;
|
||||
gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
|
||||
@@ -429,7 +431,7 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
|
||||
|
||||
rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
|
||||
rdev->qplib_res.netdev->dev_addr,
|
||||
vlan_id, true, &tbl_idx);
|
||||
vlan_id, true, &tbl_idx, false, 0);
|
||||
if (rc == -EALREADY) {
|
||||
ctx_tbl = sgid_tbl->ctx;
|
||||
ctx_tbl[tbl_idx]->refcnt++;
|
||||
@@ -955,6 +957,20 @@ fail:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!rdev->rcfw.roce_mirror)
|
||||
return;
|
||||
|
||||
rc = bnxt_qplib_del_sgid(&rdev->qplib_res.sgid_tbl,
|
||||
(struct bnxt_qplib_gid *)&rdev->ugid,
|
||||
0xFFFF, true);
|
||||
if (rc)
|
||||
dev_err(rdev_to_dev(rdev), "Failed to delete unique GID, rc: %d\n", rc);
|
||||
}
|
||||
|
||||
/* Queue Pairs */
|
||||
int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
{
|
||||
@@ -994,6 +1010,9 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
|
||||
atomic_dec(&rdev->stats.res.ud_qp_count);
|
||||
|
||||
if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
|
||||
bnxt_re_del_unique_gid(rdev);
|
||||
|
||||
ib_umem_release(qp->rumem);
|
||||
ib_umem_release(qp->sumem);
|
||||
|
||||
@@ -1018,6 +1037,8 @@ static u8 __from_ib_qp_type(enum ib_qp_type type)
|
||||
return CMDQ_CREATE_QP_TYPE_RC;
|
||||
case IB_QPT_UD:
|
||||
return CMDQ_CREATE_QP_TYPE_UD;
|
||||
case IB_QPT_RAW_PACKET:
|
||||
return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE;
|
||||
default:
|
||||
return IB_QPT_MAX;
|
||||
}
|
||||
@@ -1595,6 +1616,29 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_re_add_unique_gid(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
|
||||
struct bnxt_qplib_res *res = &rdev->qplib_res;
|
||||
int rc;
|
||||
|
||||
if (!rdev->rcfw.roce_mirror)
|
||||
return 0;
|
||||
|
||||
rdev->ugid.global.subnet_prefix = cpu_to_be64(0xfe8000000000abcdLL);
|
||||
addrconf_ifid_eui48(&rdev->ugid.raw[8], rdev->netdev);
|
||||
|
||||
rc = bnxt_qplib_add_sgid(&res->sgid_tbl,
|
||||
(struct bnxt_qplib_gid *)&rdev->ugid,
|
||||
rdev->qplib_res.netdev->dev_addr,
|
||||
0xFFFF, true, &rdev->ugid_index, true,
|
||||
hctx->stats3.fw_id);
|
||||
if (rc)
|
||||
dev_err(rdev_to_dev(rdev), "Failed to add unique GID. rc = %d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
@@ -1656,6 +1700,17 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
|
||||
}
|
||||
}
|
||||
|
||||
/* Support for RawEth QP is added to capture TCP pkt dump.
|
||||
* So unique SGID is used to avoid incorrect statistics on per
|
||||
* function stats_ctx
|
||||
*/
|
||||
if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE) {
|
||||
rc = bnxt_re_add_unique_gid(rdev);
|
||||
if (rc)
|
||||
goto qp_destroy;
|
||||
qp->qplib_qp.ugid_index = rdev->ugid_index;
|
||||
}
|
||||
|
||||
qp->ib_qp.qp_num = qp->qplib_qp.id;
|
||||
if (qp_init_attr->qp_type == IB_QPT_GSI)
|
||||
rdev->gsi_ctx.gsi_qp = qp;
|
||||
@@ -2301,7 +2356,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||
qp_attr->pkey_index = qplib_qp->pkey_index;
|
||||
qp_attr->qkey = qplib_qp->qkey;
|
||||
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
|
||||
rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
|
||||
rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->udp_sport,
|
||||
qplib_qp->ah.host_sgid_index,
|
||||
qplib_qp->ah.hop_limit,
|
||||
qplib_qp->ah.traffic_class);
|
||||
@@ -3248,9 +3303,9 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
||||
IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(cq->resize_umem)) {
|
||||
rc = PTR_ERR(cq->resize_umem);
|
||||
ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %pe\n",
|
||||
__func__, cq->resize_umem);
|
||||
cq->resize_umem = NULL;
|
||||
ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
|
||||
__func__, rc);
|
||||
goto fail;
|
||||
}
|
||||
cq->resize_cqe = entries;
|
||||
@@ -4392,6 +4447,93 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_re_setup_vnic(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = bnxt_re_hwrm_alloc_vnic(rdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = bnxt_re_hwrm_cfg_vnic(rdev, qp->qplib_qp.id);
|
||||
if (rc)
|
||||
goto out_free_vnic;
|
||||
|
||||
return 0;
|
||||
out_free_vnic:
|
||||
bnxt_re_hwrm_free_vnic(rdev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
|
||||
struct ib_flow_attr *attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
|
||||
struct bnxt_re_dev *rdev = qp->rdev;
|
||||
struct bnxt_re_flow *flow;
|
||||
int rc;
|
||||
|
||||
if (attr->type != IB_FLOW_ATTR_SNIFFER ||
|
||||
!rdev->rcfw.roce_mirror)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
mutex_lock(&rdev->qp_lock);
|
||||
if (rdev->sniffer_flow_created) {
|
||||
ibdev_err(&rdev->ibdev, "RoCE Mirroring is already Configured\n");
|
||||
mutex_unlock(&rdev->qp_lock);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
|
||||
if (!flow) {
|
||||
mutex_unlock(&rdev->qp_lock);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
flow->rdev = rdev;
|
||||
|
||||
rc = bnxt_re_setup_vnic(rdev, qp);
|
||||
if (rc)
|
||||
goto out_free_flow;
|
||||
|
||||
rc = bnxt_qplib_create_flow(&rdev->qplib_res);
|
||||
if (rc)
|
||||
goto out_free_vnic;
|
||||
|
||||
rdev->sniffer_flow_created = 1;
|
||||
mutex_unlock(&rdev->qp_lock);
|
||||
|
||||
return &flow->ib_flow;
|
||||
|
||||
out_free_vnic:
|
||||
bnxt_re_hwrm_free_vnic(rdev);
|
||||
out_free_flow:
|
||||
mutex_unlock(&rdev->qp_lock);
|
||||
kfree(flow);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
int bnxt_re_destroy_flow(struct ib_flow *flow_id)
|
||||
{
|
||||
struct bnxt_re_flow *flow =
|
||||
container_of(flow_id, struct bnxt_re_flow, ib_flow);
|
||||
struct bnxt_re_dev *rdev = flow->rdev;
|
||||
int rc;
|
||||
|
||||
mutex_lock(&rdev->qp_lock);
|
||||
rc = bnxt_qplib_destroy_flow(&rdev->qplib_res);
|
||||
if (rc)
|
||||
ibdev_dbg(&rdev->ibdev, "failed to destroy_flow rc = %d\n", rc);
|
||||
rdev->sniffer_flow_created = 0;
|
||||
|
||||
bnxt_re_hwrm_free_vnic(rdev);
|
||||
mutex_unlock(&rdev->qp_lock);
|
||||
kfree(flow);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
|
||||
{
|
||||
struct bnxt_re_cq *cq = NULL, *tmp_cq;
|
||||
@@ -4604,7 +4746,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *
|
||||
return err;
|
||||
|
||||
err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
|
||||
&dpi, sizeof(length));
|
||||
&dpi, sizeof(dpi));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
||||
@@ -164,6 +164,11 @@ struct bnxt_re_user_mmap_entry {
|
||||
u8 mmap_flag;
|
||||
};
|
||||
|
||||
struct bnxt_re_flow {
|
||||
struct ib_flow ib_flow;
|
||||
struct bnxt_re_dev *rdev;
|
||||
};
|
||||
|
||||
static inline u16 bnxt_re_get_swqe_size(int nsge)
|
||||
{
|
||||
return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
|
||||
@@ -267,6 +272,11 @@ struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
|
||||
struct uverbs_attr_bundle *attrs);
|
||||
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
|
||||
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
|
||||
struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
|
||||
struct ib_flow_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_destroy_flow(struct ib_flow *flow_id);
|
||||
|
||||
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
|
||||
|
||||
|
||||
@@ -80,6 +80,7 @@ MODULE_LICENSE("Dual BSD/GPL");
|
||||
static DEFINE_MUTEX(bnxt_re_mutex);
|
||||
|
||||
static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
|
||||
static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev);
|
||||
|
||||
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
|
||||
u32 *offset);
|
||||
@@ -188,6 +189,10 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
|
||||
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
|
||||
rdev->qplib_res.en_dev = en_dev;
|
||||
|
||||
rc = bnxt_re_query_hwrm_intf_version(rdev);
|
||||
if (rc)
|
||||
goto free_dev_attr;
|
||||
|
||||
bnxt_re_set_drv_mode(rdev);
|
||||
|
||||
bnxt_re_set_db_offset(rdev);
|
||||
@@ -540,6 +545,72 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
|
||||
fw_msg->timeout = timeout;
|
||||
}
|
||||
|
||||
void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct hwrm_vnic_free_input req = {};
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc;
|
||||
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_FREE);
|
||||
|
||||
req.vnic_id = cpu_to_le32(rdev->mirror_vnic_id);
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL,
|
||||
0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (rc)
|
||||
ibdev_dbg(&rdev->ibdev,
|
||||
"Failed to free vnic, rc = %d\n", rc);
|
||||
}
|
||||
|
||||
int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct hwrm_vnic_alloc_output resp = {};
|
||||
struct hwrm_vnic_alloc_input req = {};
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc;
|
||||
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_ALLOC);
|
||||
|
||||
req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id);
|
||||
req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID);
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (rc)
|
||||
ibdev_dbg(&rdev->ibdev,
|
||||
"Failed to alloc vnic, rc = %d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct hwrm_vnic_cfg_input req = {};
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc;
|
||||
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_CFG);
|
||||
|
||||
req.flags = cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE);
|
||||
req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_RAW_QP_ID |
|
||||
VNIC_CFG_REQ_ENABLES_MRU);
|
||||
req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id);
|
||||
req.raw_qp_id = cpu_to_le32(qp_id);
|
||||
req.mru = cpu_to_le16(rdev->netdev->mtu + VLAN_ETH_HLEN);
|
||||
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL,
|
||||
0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (rc)
|
||||
ibdev_dbg(&rdev->ibdev,
|
||||
"Failed to cfg vnic, rc = %d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Query device config using common hwrm */
|
||||
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
|
||||
u32 *offset)
|
||||
@@ -553,11 +624,12 @@ static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG);
|
||||
req.fid = cpu_to_le16(0xffff);
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||
sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (!rc) {
|
||||
*db_len = PAGE_ALIGN(le16_to_cpu(resp.l2_doorbell_bar_size_kb) * 1024);
|
||||
*offset = PAGE_ALIGN(le16_to_cpu(resp.legacy_l2_db_size_kb) * 1024);
|
||||
rdev->mirror_vnic_id = le16_to_cpu(resp.mirror_vnic_id);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
@@ -577,7 +649,7 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCAPS);
|
||||
req.fid = cpu_to_le16(0xffff);
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||
sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
|
||||
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (rc)
|
||||
@@ -587,6 +659,8 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
|
||||
flags_ext2 = le32_to_cpu(resp.flags_ext2);
|
||||
cctx->modes.dbr_pacing = flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ||
|
||||
flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED;
|
||||
cctx->modes.roce_mirror = !!(le32_to_cpu(resp.flags_ext3) &
|
||||
FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -603,7 +677,7 @@ static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
|
||||
cctx = rdev->chip_ctx;
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_QCFG);
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||
sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
@@ -842,20 +916,12 @@ static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev)
|
||||
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
|
||||
u16 fw_ring_id, int type)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev;
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct hwrm_ring_free_input req = {};
|
||||
struct hwrm_ring_free_output resp;
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!rdev)
|
||||
return rc;
|
||||
|
||||
en_dev = rdev->en_dev;
|
||||
|
||||
if (!en_dev)
|
||||
return rc;
|
||||
|
||||
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
|
||||
return 0;
|
||||
|
||||
@@ -863,7 +929,7 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
|
||||
req.ring_type = type;
|
||||
req.ring_id = cpu_to_le16(fw_ring_id);
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||
sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (rc)
|
||||
ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
|
||||
@@ -881,9 +947,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!en_dev)
|
||||
return rc;
|
||||
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC);
|
||||
req.enables = 0;
|
||||
req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
|
||||
@@ -899,7 +962,7 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
|
||||
req.ring_type = ring_attr->type;
|
||||
req.int_mode = ring_attr->mode;
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||
sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (!rc)
|
||||
*fw_ring_id = le16_to_cpu(resp.ring_id);
|
||||
@@ -916,16 +979,13 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!en_dev)
|
||||
return rc;
|
||||
|
||||
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
|
||||
return 0;
|
||||
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE);
|
||||
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||
sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (rc)
|
||||
ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
|
||||
@@ -935,8 +995,7 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
|
||||
}
|
||||
|
||||
static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
|
||||
dma_addr_t dma_map,
|
||||
u32 *fw_stats_ctx_id)
|
||||
struct bnxt_qplib_stats *stats)
|
||||
{
|
||||
struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
|
||||
struct hwrm_stat_ctx_alloc_output resp = {};
|
||||
@@ -945,21 +1004,18 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc = -EINVAL;
|
||||
|
||||
*fw_stats_ctx_id = INVALID_STATS_CTX_ID;
|
||||
|
||||
if (!en_dev)
|
||||
return rc;
|
||||
stats->fw_id = INVALID_STATS_CTX_ID;
|
||||
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC);
|
||||
req.update_period_ms = cpu_to_le32(1000);
|
||||
req.stats_dma_addr = cpu_to_le64(dma_map);
|
||||
req.stats_dma_addr = cpu_to_le64(stats->dma_map);
|
||||
req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
|
||||
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||
sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (!rc)
|
||||
*fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
|
||||
stats->fw_id = le32_to_cpu(resp.stat_ctx_id);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -975,7 +1031,7 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
|
||||
struct bnxt_re_dev *rdev =
|
||||
rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
|
||||
|
||||
return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor);
|
||||
return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->revision);
|
||||
}
|
||||
static DEVICE_ATTR_RO(hw_rev);
|
||||
|
||||
@@ -985,13 +1041,31 @@ static ssize_t hca_type_show(struct device *device,
|
||||
struct bnxt_re_dev *rdev =
|
||||
rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
|
||||
|
||||
return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc);
|
||||
return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->device);
|
||||
}
|
||||
static DEVICE_ATTR_RO(hca_type);
|
||||
|
||||
static ssize_t board_id_show(struct device *device, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = rdma_device_to_drv_device(device,
|
||||
struct bnxt_re_dev, ibdev);
|
||||
char buffer[BNXT_VPD_FLD_LEN] = {};
|
||||
|
||||
if (!rdev->is_virtfn)
|
||||
memcpy(buffer, rdev->board_partno, BNXT_VPD_FLD_LEN - 1);
|
||||
else
|
||||
scnprintf(buffer, BNXT_VPD_FLD_LEN, "0x%x-VF",
|
||||
rdev->en_dev->pdev->device);
|
||||
|
||||
return sysfs_emit(buf, "%s\n", buffer);
|
||||
}
|
||||
static DEVICE_ATTR_RO(board_id);
|
||||
|
||||
static struct attribute *bnxt_re_attributes[] = {
|
||||
&dev_attr_hw_rev.attr,
|
||||
&dev_attr_hca_type.attr,
|
||||
&dev_attr_board_id.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
@@ -1207,6 +1281,8 @@ static int bnxt_re_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq
|
||||
goto err;
|
||||
if (rdma_nl_put_driver_u32_hex(msg, "max_sge", srq->qplib_srq.max_sge))
|
||||
goto err;
|
||||
if (rdma_nl_put_driver_u32_hex(msg, "srq_limit", srq->qplib_srq.threshold))
|
||||
goto err;
|
||||
|
||||
nla_nest_end(msg, table_attr);
|
||||
return 0;
|
||||
@@ -1297,6 +1373,8 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
|
||||
.reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf,
|
||||
.req_notify_cq = bnxt_re_req_notify_cq,
|
||||
.resize_cq = bnxt_re_resize_cq,
|
||||
.create_flow = bnxt_re_create_flow,
|
||||
.destroy_flow = bnxt_re_destroy_flow,
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
|
||||
@@ -1323,8 +1401,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
|
||||
|
||||
/* ib device init */
|
||||
ibdev->node_type = RDMA_NODE_IB_CA;
|
||||
strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
|
||||
strlen(BNXT_RE_DESC) + 5);
|
||||
strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA");
|
||||
ibdev->phys_port_cnt = 1;
|
||||
|
||||
addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr);
|
||||
@@ -1850,81 +1927,6 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
|
||||
mutex_unlock(&rdev->qp_lock);
|
||||
}
|
||||
|
||||
static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
|
||||
struct bnxt_qplib_gid gid;
|
||||
u16 gid_idx, index;
|
||||
int rc = 0;
|
||||
|
||||
if (!ib_device_try_get(&rdev->ibdev))
|
||||
return 0;
|
||||
|
||||
for (index = 0; index < sgid_tbl->active; index++) {
|
||||
gid_idx = sgid_tbl->hw_id[index];
|
||||
|
||||
if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
|
||||
sizeof(bnxt_qplib_gid_zero)))
|
||||
continue;
|
||||
/* need to modify the VLAN enable setting of non VLAN GID only
|
||||
* as setting is done for VLAN GID while adding GID
|
||||
*/
|
||||
if (sgid_tbl->vlan[index])
|
||||
continue;
|
||||
|
||||
memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
|
||||
|
||||
rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
|
||||
rdev->qplib_res.netdev->dev_addr);
|
||||
}
|
||||
|
||||
ib_device_put(&rdev->ibdev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
u32 prio_map = 0, tmp_map = 0;
|
||||
struct net_device *netdev;
|
||||
struct dcb_app app = {};
|
||||
|
||||
netdev = rdev->netdev;
|
||||
|
||||
app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
|
||||
app.protocol = ETH_P_IBOE;
|
||||
tmp_map = dcb_ieee_getapp_mask(netdev, &app);
|
||||
prio_map = tmp_map;
|
||||
|
||||
app.selector = IEEE_8021QAZ_APP_SEL_DGRAM;
|
||||
app.protocol = ROCE_V2_UDP_DPORT;
|
||||
tmp_map = dcb_ieee_getapp_mask(netdev, &app);
|
||||
prio_map |= tmp_map;
|
||||
|
||||
return prio_map;
|
||||
}
|
||||
|
||||
static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
u8 prio_map = 0;
|
||||
|
||||
/* Get priority for roce */
|
||||
prio_map = bnxt_re_get_priority_mask(rdev);
|
||||
|
||||
if (prio_map == rdev->cur_prio_map)
|
||||
return 0;
|
||||
rdev->cur_prio_map = prio_map;
|
||||
/* Actual priorities are not programmed as they are already
|
||||
* done by L2 driver; just enable or disable priority vlan tagging
|
||||
*/
|
||||
if ((prio_map == 0 && rdev->qplib_res.prio) ||
|
||||
(prio_map != 0 && !rdev->qplib_res.prio)) {
|
||||
rdev->qplib_res.prio = prio_map;
|
||||
bnxt_re_update_gid(rdev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
if (rdev->is_virtfn)
|
||||
@@ -1945,7 +1947,31 @@ static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
|
||||
ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
|
||||
}
|
||||
|
||||
static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
|
||||
static void bnxt_re_read_vpd_info(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct pci_dev *pdev = rdev->en_dev->pdev;
|
||||
unsigned int vpd_size, kw_len;
|
||||
int pos, size;
|
||||
u8 *vpd_data;
|
||||
|
||||
vpd_data = pci_vpd_alloc(pdev, &vpd_size);
|
||||
if (IS_ERR(vpd_data)) {
|
||||
pci_warn(pdev, "Unable to read VPD, err=%pe\n", vpd_data);
|
||||
return;
|
||||
}
|
||||
|
||||
pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
|
||||
PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
|
||||
if (pos < 0)
|
||||
goto free;
|
||||
|
||||
size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
|
||||
memcpy(rdev->board_partno, &vpd_data[pos], size);
|
||||
free:
|
||||
kfree(vpd_data);
|
||||
}
|
||||
|
||||
static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct hwrm_ver_get_output resp = {};
|
||||
@@ -1964,7 +1990,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
|
||||
if (rc) {
|
||||
ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
|
||||
rc);
|
||||
return;
|
||||
return rc;
|
||||
}
|
||||
|
||||
cctx = rdev->chip_ctx;
|
||||
@@ -1978,6 +2004,8 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
|
||||
|
||||
if (!cctx->hwrm_cmd_max_timeout)
|
||||
cctx->hwrm_cmd_max_timeout = RCFW_FW_STALL_MAX_TIMEOUT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
|
||||
@@ -2039,6 +2067,72 @@ static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
|
||||
struct bnxt_qplib_res *res = &rdev->qplib_res;
|
||||
int rc;
|
||||
|
||||
rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats);
|
||||
if (rc)
|
||||
goto free_stat_mem;
|
||||
|
||||
return 0;
|
||||
free_stat_mem:
|
||||
bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_re_get_stats3_ctx(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
|
||||
struct bnxt_qplib_res *res = &rdev->qplib_res;
|
||||
int rc;
|
||||
|
||||
if (!rdev->rcfw.roce_mirror)
|
||||
return 0;
|
||||
|
||||
rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats3);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats3);
|
||||
if (rc)
|
||||
goto free_stat_mem;
|
||||
|
||||
return 0;
|
||||
free_stat_mem:
|
||||
bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnxt_re_put_stats3_ctx(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
|
||||
struct bnxt_qplib_res *res = &rdev->qplib_res;
|
||||
|
||||
if (!rdev->rcfw.roce_mirror)
|
||||
return;
|
||||
|
||||
bnxt_re_net_stats_ctx_free(rdev, hctx->stats3.fw_id);
|
||||
bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
|
||||
}
|
||||
|
||||
static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
|
||||
struct bnxt_qplib_res *res = &rdev->qplib_res;
|
||||
|
||||
bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id);
|
||||
bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
|
||||
}
|
||||
|
||||
static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
|
||||
{
|
||||
u8 type;
|
||||
@@ -2049,8 +2143,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
|
||||
bnxt_re_net_unregister_async_event(rdev);
|
||||
bnxt_re_uninit_dcb_wq(rdev);
|
||||
|
||||
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
|
||||
cancel_delayed_work_sync(&rdev->worker);
|
||||
bnxt_re_put_stats3_ctx(rdev);
|
||||
|
||||
bnxt_re_free_gid_ctx(rdev);
|
||||
if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
|
||||
@@ -2064,8 +2157,8 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
|
||||
if (rc)
|
||||
ibdev_warn(&rdev->ibdev,
|
||||
"Failed to deinitialize RCFW: %#x", rc);
|
||||
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
|
||||
bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
|
||||
bnxt_re_put_stats_ctx(rdev);
|
||||
bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
|
||||
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
|
||||
type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
|
||||
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
|
||||
@@ -2085,16 +2178,6 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
|
||||
}
|
||||
}
|
||||
|
||||
/* worker thread for polling periodic events. Now used for QoS programming*/
|
||||
static void bnxt_re_worker(struct work_struct *work)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
|
||||
worker.work);
|
||||
|
||||
bnxt_re_setup_qos(rdev);
|
||||
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
|
||||
}
|
||||
|
||||
static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
|
||||
{
|
||||
struct bnxt_re_ring_attr rattr = {};
|
||||
@@ -2109,8 +2192,9 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
|
||||
rc = bnxt_re_register_netdev(rdev);
|
||||
if (rc) {
|
||||
ibdev_err(&rdev->ibdev,
|
||||
"Failed to register with netedev: %#x\n", rc);
|
||||
return -EINVAL;
|
||||
"Failed to register with Ethernet driver, rc %d\n",
|
||||
rc);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
|
||||
@@ -2148,8 +2232,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
|
||||
/* Check whether VF or PF */
|
||||
bnxt_re_get_sriov_func_type(rdev);
|
||||
|
||||
bnxt_re_query_hwrm_intf_version(rdev);
|
||||
|
||||
/* Establish RCFW Communication Channel to initialize the context
|
||||
* memory for the function and all child VFs
|
||||
*/
|
||||
@@ -2199,18 +2281,20 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
|
||||
if (rc)
|
||||
goto disable_rcfw;
|
||||
|
||||
bnxt_qplib_query_version(&rdev->rcfw);
|
||||
bnxt_re_set_resource_limits(rdev);
|
||||
|
||||
rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
|
||||
bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx));
|
||||
if (rc) {
|
||||
ibdev_err(&rdev->ibdev,
|
||||
"Failed to allocate QPLIB context: %#x\n", rc);
|
||||
goto disable_rcfw;
|
||||
if (!rdev->is_virtfn &&
|
||||
!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
|
||||
rc = bnxt_qplib_alloc_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
|
||||
if (rc) {
|
||||
ibdev_err(&rdev->ibdev,
|
||||
"Failed to allocate hw context: %#x\n", rc);
|
||||
goto disable_rcfw;
|
||||
}
|
||||
}
|
||||
rc = bnxt_re_net_stats_ctx_alloc(rdev,
|
||||
rdev->qplib_ctx.stats.dma_map,
|
||||
&rdev->qplib_ctx.stats.fw_id);
|
||||
|
||||
rc = bnxt_re_get_stats_ctx(rdev);
|
||||
if (rc) {
|
||||
ibdev_err(&rdev->ibdev,
|
||||
"Failed to allocate stats context: %#x\n", rc);
|
||||
@@ -2249,15 +2333,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
|
||||
if (rc)
|
||||
ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n");
|
||||
|
||||
rc = bnxt_re_setup_qos(rdev);
|
||||
if (rc)
|
||||
ibdev_info(&rdev->ibdev,
|
||||
"RoCE priority not yet configured\n");
|
||||
|
||||
INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
|
||||
set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
|
||||
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
|
||||
|
||||
if (!(rdev->qplib_res.en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT))
|
||||
bnxt_re_vf_res_config(rdev);
|
||||
}
|
||||
@@ -2270,11 +2345,18 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
|
||||
bnxt_re_init_dcb_wq(rdev);
|
||||
bnxt_re_net_register_async_event(rdev);
|
||||
|
||||
if (!rdev->is_virtfn)
|
||||
bnxt_re_read_vpd_info(rdev);
|
||||
|
||||
rc = bnxt_re_get_stats3_ctx(rdev);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
free_sctx:
|
||||
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
|
||||
free_ctx:
|
||||
bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
|
||||
bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
|
||||
disable_rcfw:
|
||||
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
|
||||
free_ring:
|
||||
|
||||
@@ -1307,6 +1307,7 @@ static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp)
|
||||
|
||||
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
{
|
||||
struct bnxt_qplib_sgid_tbl *sgid_tbl = &res->sgid_tbl;
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct creq_modify_qp_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
@@ -1358,9 +1359,14 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
|
||||
req.flow_label = cpu_to_le32(qp->ah.flow_label);
|
||||
|
||||
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
|
||||
req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
|
||||
[qp->ah.sgid_index]);
|
||||
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) {
|
||||
if (qp->type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
|
||||
req.sgid_index =
|
||||
cpu_to_le16(sgid_tbl->hw_id[qp->ugid_index]);
|
||||
else
|
||||
req.sgid_index =
|
||||
cpu_to_le16(sgid_tbl->hw_id[qp->ah.sgid_index]);
|
||||
}
|
||||
|
||||
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
|
||||
req.hop_limit = qp->ah.hop_limit;
|
||||
@@ -1464,6 +1470,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
qp->access = sb->access;
|
||||
qp->pkey_index = le16_to_cpu(sb->pkey);
|
||||
qp->qkey = le32_to_cpu(sb->qkey);
|
||||
qp->udp_sport = le16_to_cpu(sb->udp_src_port);
|
||||
|
||||
temp32[0] = le32_to_cpu(sb->dgid[0]);
|
||||
temp32[1] = le32_to_cpu(sb->dgid[1]);
|
||||
|
||||
@@ -299,6 +299,7 @@ struct bnxt_qplib_qp {
|
||||
u8 smac[6];
|
||||
u16 vlan_id;
|
||||
u16 port_id;
|
||||
u16 udp_sport;
|
||||
u8 nw_type;
|
||||
struct bnxt_qplib_ah ah;
|
||||
|
||||
@@ -344,6 +345,7 @@ struct bnxt_qplib_qp {
|
||||
u32 msn_tbl_sz;
|
||||
bool is_host_msn_tbl;
|
||||
u8 tos_dscp;
|
||||
u32 ugid_index;
|
||||
};
|
||||
|
||||
#define BNXT_RE_MAX_MSG_SIZE 0x80000000
|
||||
|
||||
@@ -186,7 +186,7 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
|
||||
* wait for command completion. Maximum holding interval is 8 second.
|
||||
*
|
||||
* Returns:
|
||||
* -ETIMEOUT if command is not completed in specific time interval.
|
||||
* -ETIMEDOUT if command is not completed in specific time interval.
|
||||
* 0 if command is completed by firmware.
|
||||
*/
|
||||
static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
|
||||
@@ -366,6 +366,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
wmb();
|
||||
writel(cmdq_prod, cmdq->cmdq_mbox.prod);
|
||||
writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
|
||||
print_hex_dump_bytes("req: ", DUMP_PREFIX_OFFSET, msg->req, msg->req_sz);
|
||||
spin_unlock_bh(&hwq->lock);
|
||||
/* Return the CREQ response pointer */
|
||||
return 0;
|
||||
@@ -381,7 +382,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
* This function can not be called from non-sleepable context.
|
||||
*
|
||||
* Returns:
|
||||
* -ETIMEOUT if command is not completed in specific time interval.
|
||||
* -ETIMEDOUT if command is not completed in specific time interval.
|
||||
* 0 if command is completed by firmware.
|
||||
*/
|
||||
static int __poll_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
|
||||
@@ -631,6 +632,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
|
||||
int rc = 0;
|
||||
|
||||
pdev = rcfw->pdev;
|
||||
print_hex_dump_bytes("event: ", DUMP_PREFIX_OFFSET, qp_event, sizeof(*qp_event));
|
||||
switch (qp_event->event) {
|
||||
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
|
||||
err_event = (struct creq_qp_error_notification *)qp_event;
|
||||
@@ -903,6 +905,10 @@ skip_ctx_setup:
|
||||
flags |= CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED;
|
||||
if (rcfw->res->en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT)
|
||||
flags |= CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT;
|
||||
if (bnxt_qplib_roce_mirror_supported(rcfw->res->cctx)) {
|
||||
flags |= CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED;
|
||||
rcfw->roce_mirror = true;
|
||||
}
|
||||
req.flags |= cpu_to_le16(flags);
|
||||
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
|
||||
|
||||
@@ -236,6 +236,7 @@ struct bnxt_qplib_rcfw {
|
||||
atomic_t timeout_send;
|
||||
/* cached from chip cctx for quick reference in slow path */
|
||||
u16 max_timeout;
|
||||
bool roce_mirror;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_cmdqmsg {
|
||||
|
||||
@@ -53,12 +53,6 @@
|
||||
#include "qplib_sp.h"
|
||||
#include "qplib_rcfw.h"
|
||||
|
||||
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_stats *stats);
|
||||
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_chip_ctx *cctx,
|
||||
struct bnxt_qplib_stats *stats);
|
||||
|
||||
/* PBL */
|
||||
static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
|
||||
bool is_umem)
|
||||
@@ -352,8 +346,8 @@ fail:
|
||||
}
|
||||
|
||||
/* Context Tables */
|
||||
void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_ctx *ctx)
|
||||
void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_ctx *ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -367,7 +361,6 @@ void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
|
||||
/* restore original pde level before destroy */
|
||||
ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
|
||||
bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
|
||||
bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
|
||||
}
|
||||
|
||||
static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
|
||||
@@ -466,7 +459,7 @@ fail:
|
||||
}
|
||||
|
||||
/*
|
||||
* Routine: bnxt_qplib_alloc_ctx
|
||||
* Routine: bnxt_qplib_alloc_hwctx
|
||||
* Description:
|
||||
* Context tables are memories which are used by the chip fw.
|
||||
* The 6 tables defined are:
|
||||
@@ -486,17 +479,13 @@ fail:
|
||||
* Returns:
|
||||
* 0 if success, else -ERRORS
|
||||
*/
|
||||
int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_ctx *ctx,
|
||||
bool virt_fn, bool is_p5)
|
||||
int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_ctx *ctx)
|
||||
{
|
||||
struct bnxt_qplib_hwq_attr hwq_attr = {};
|
||||
struct bnxt_qplib_sg_info sginfo = {};
|
||||
int rc;
|
||||
|
||||
if (virt_fn || is_p5)
|
||||
goto stats_alloc;
|
||||
|
||||
/* QPC Tables */
|
||||
sginfo.pgsize = PAGE_SIZE;
|
||||
sginfo.pgshft = PAGE_SHIFT;
|
||||
@@ -542,16 +531,11 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
|
||||
rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
|
||||
if (rc)
|
||||
goto fail;
|
||||
stats_alloc:
|
||||
/* Stats */
|
||||
rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
bnxt_qplib_free_ctx(res, ctx);
|
||||
bnxt_qplib_free_hwctx(res, ctx);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -832,8 +816,8 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
|
||||
}
|
||||
|
||||
/* Stats */
|
||||
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_stats *stats)
|
||||
void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_stats *stats)
|
||||
{
|
||||
if (stats->dma) {
|
||||
dma_free_coherent(&pdev->dev, stats->size,
|
||||
@@ -843,9 +827,9 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
|
||||
stats->fw_id = -1;
|
||||
}
|
||||
|
||||
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_chip_ctx *cctx,
|
||||
struct bnxt_qplib_stats *stats)
|
||||
int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_chip_ctx *cctx,
|
||||
struct bnxt_qplib_stats *stats)
|
||||
{
|
||||
memset(stats, 0, sizeof(*stats));
|
||||
stats->fw_id = -1;
|
||||
|
||||
@@ -65,6 +65,7 @@ struct bnxt_qplib_drv_modes {
|
||||
bool db_push;
|
||||
bool dbr_pacing;
|
||||
u32 toggle_bits;
|
||||
u8 roce_mirror;
|
||||
};
|
||||
|
||||
enum bnxt_re_toggle_modes {
|
||||
@@ -303,6 +304,7 @@ struct bnxt_qplib_ctx {
|
||||
struct bnxt_qplib_hwq tim_tbl;
|
||||
struct bnxt_qplib_tqm_ctx tqm_ctx;
|
||||
struct bnxt_qplib_stats stats;
|
||||
struct bnxt_qplib_stats stats3;
|
||||
struct bnxt_qplib_vf_res vf_res;
|
||||
};
|
||||
|
||||
@@ -432,15 +434,19 @@ void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
|
||||
int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
|
||||
void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
|
||||
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
|
||||
void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_ctx *ctx);
|
||||
int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_ctx *ctx,
|
||||
bool virt_fn, bool is_p5);
|
||||
void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_ctx *ctx);
|
||||
int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_ctx *ctx);
|
||||
int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res);
|
||||
void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res);
|
||||
|
||||
int bnxt_qplib_determine_atomics(struct pci_dev *dev);
|
||||
int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_chip_ctx *cctx,
|
||||
struct bnxt_qplib_stats *stats);
|
||||
void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_stats *stats);
|
||||
|
||||
static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo,
|
||||
struct bnxt_qplib_hwq *hwq, u32 cnt)
|
||||
@@ -582,6 +588,11 @@ static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
|
||||
return cctx->modes.dbr_pacing;
|
||||
}
|
||||
|
||||
static inline u8 bnxt_qplib_roce_mirror_supported(struct bnxt_qplib_chip_ctx *cctx)
|
||||
{
|
||||
return cctx->modes.roce_mirror;
|
||||
}
|
||||
|
||||
static inline bool _is_alloc_mr_unified(u16 dev_cap_flags)
|
||||
{
|
||||
return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
|
||||
|
||||
@@ -66,14 +66,15 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
|
||||
return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
|
||||
}
|
||||
|
||||
static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
|
||||
char *fw_ver)
|
||||
void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw)
|
||||
{
|
||||
struct creq_query_version_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_query_version req = {};
|
||||
struct bnxt_qplib_dev_attr *attr;
|
||||
int rc;
|
||||
|
||||
attr = rcfw->res->dattr;
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_QUERY_VERSION,
|
||||
sizeof(req));
|
||||
@@ -82,10 +83,10 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
if (rc)
|
||||
return;
|
||||
fw_ver[0] = resp.fw_maj;
|
||||
fw_ver[1] = resp.fw_minor;
|
||||
fw_ver[2] = resp.fw_bld;
|
||||
fw_ver[3] = resp.fw_rsvd;
|
||||
attr->fw_ver[0] = resp.fw_maj;
|
||||
attr->fw_ver[1] = resp.fw_minor;
|
||||
attr->fw_ver[2] = resp.fw_bld;
|
||||
attr->fw_ver[3] = resp.fw_rsvd;
|
||||
}
|
||||
|
||||
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
|
||||
@@ -179,8 +180,6 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
|
||||
if (_is_max_srq_ext_supported(attr->dev_cap_flags2))
|
||||
attr->max_srq += le16_to_cpu(sb->max_srq_ext);
|
||||
|
||||
bnxt_qplib_query_version(rcfw, attr->fw_ver);
|
||||
|
||||
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
|
||||
temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
|
||||
tqm_alloc = (u8 *)&temp;
|
||||
@@ -309,7 +308,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
|
||||
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, const u8 *smac,
|
||||
u16 vlan_id, bool update, u32 *index)
|
||||
u16 vlan_id, bool update, u32 *index,
|
||||
bool is_ugid, u32 stats_ctx_id)
|
||||
{
|
||||
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
|
||||
struct bnxt_qplib_res,
|
||||
@@ -374,6 +374,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
|
||||
req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
|
||||
|
||||
req.stats_ctx = cpu_to_le16(CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID |
|
||||
(u16)stats_ctx_id);
|
||||
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
@@ -397,46 +400,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, u16 gid_idx,
|
||||
const u8 *smac)
|
||||
{
|
||||
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
|
||||
struct bnxt_qplib_res,
|
||||
sgid_tbl);
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct creq_modify_gid_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_modify_gid req = {};
|
||||
int rc;
|
||||
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_MODIFY_GID,
|
||||
sizeof(req));
|
||||
|
||||
req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
|
||||
req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
|
||||
req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
|
||||
req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
|
||||
if (res->prio) {
|
||||
req.vlan |= cpu_to_le16
|
||||
(CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
|
||||
CMDQ_ADD_GID_VLAN_VLAN_EN);
|
||||
}
|
||||
|
||||
/* MAC in network format */
|
||||
req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
|
||||
req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
|
||||
req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
|
||||
|
||||
req.gid_index = cpu_to_le16(gid_idx);
|
||||
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* AH */
|
||||
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
|
||||
bool block)
|
||||
@@ -1143,3 +1106,40 @@ out:
|
||||
dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_qplib_create_flow(struct bnxt_qplib_res *res)
|
||||
{
|
||||
struct creq_roce_mirror_cfg_resp resp = {};
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_roce_mirror_cfg req = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG,
|
||||
sizeof(req));
|
||||
|
||||
req.mirror_flags = (u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE;
|
||||
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
return bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
}
|
||||
|
||||
int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res)
|
||||
{
|
||||
struct creq_roce_mirror_cfg_resp resp = {};
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_roce_mirror_cfg req = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG,
|
||||
sizeof(req));
|
||||
|
||||
req.mirror_flags &= ~((u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE);
|
||||
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), 0);
|
||||
|
||||
return bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
}
|
||||
|
||||
@@ -323,7 +323,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
|
||||
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, const u8 *mac, u16 vlan_id,
|
||||
bool update, u32 *index);
|
||||
bool update, u32 *index,
|
||||
bool is_ugid, u32 stats_ctx_id);
|
||||
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, u16 gid_idx,
|
||||
const u8 *smac);
|
||||
@@ -358,6 +359,9 @@ int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid,
|
||||
u32 resp_size, void *resp_va);
|
||||
int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_cc_param *cc_param);
|
||||
void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw);
|
||||
int bnxt_qplib_create_flow(struct bnxt_qplib_res *res);
|
||||
int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res);
|
||||
|
||||
#define BNXT_VAR_MAX_WQE 4352
|
||||
#define BNXT_VAR_MAX_SLOT_ALIGN 256
|
||||
|
||||
@@ -144,7 +144,8 @@ struct cmdq_base {
|
||||
#define CMDQ_BASE_OPCODE_MODIFY_CQ 0x90UL
|
||||
#define CMDQ_BASE_OPCODE_QUERY_QP_EXTEND 0x91UL
|
||||
#define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL
|
||||
#define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT
|
||||
#define CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG 0x99UL
|
||||
#define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG
|
||||
u8 cmd_size;
|
||||
__le16 flags;
|
||||
__le16 cookie;
|
||||
@@ -218,6 +219,7 @@ struct cmdq_initialize_fw {
|
||||
#define CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED 0x2UL
|
||||
#define CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED 0x8UL
|
||||
#define CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT 0x10UL
|
||||
#define CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED 0x80UL
|
||||
__le16 cookie;
|
||||
u8 resp_size;
|
||||
u8 reserved8;
|
||||
@@ -788,7 +790,8 @@ struct creq_query_qp_resp_sb {
|
||||
#define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC 0x8UL
|
||||
__le16 pkey;
|
||||
__le32 qkey;
|
||||
__le32 reserved32;
|
||||
__le16 udp_src_port;
|
||||
__le16 reserved16;
|
||||
__le32 dgid[4];
|
||||
__le32 flow_label;
|
||||
__le16 sgid_index;
|
||||
@@ -2108,6 +2111,43 @@ struct creq_query_roce_stats_ext_resp_sb {
|
||||
__le64 dup_req;
|
||||
};
|
||||
|
||||
/* cmdq_roce_mirror_cfg (size:192b/24B) */
|
||||
struct cmdq_roce_mirror_cfg {
|
||||
u8 opcode;
|
||||
#define CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG 0x99UL
|
||||
#define CMDQ_ROCE_MIRROR_CFG_OPCODE_LAST \
|
||||
CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG
|
||||
u8 cmd_size;
|
||||
__le16 flags;
|
||||
__le16 cookie;
|
||||
u8 resp_size;
|
||||
u8 reserved8;
|
||||
__le64 resp_addr;
|
||||
u8 mirror_flags;
|
||||
#define CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE 0x1UL
|
||||
u8 rsvd[7];
|
||||
};
|
||||
|
||||
/* creq_roce_mirror_cfg_resp (size:128b/16B) */
|
||||
struct creq_roce_mirror_cfg_resp {
|
||||
u8 type;
|
||||
#define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_MASK 0x3fUL
|
||||
#define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_SFT 0
|
||||
#define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT 0x38UL
|
||||
#define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_LAST \
|
||||
CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT
|
||||
u8 status;
|
||||
__le16 cookie;
|
||||
__le32 reserved32;
|
||||
u8 v;
|
||||
#define CREQ_ROCE_MIRROR_CFG_RESP_V 0x1UL
|
||||
u8 event;
|
||||
#define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG 0x99UL
|
||||
#define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_LAST \
|
||||
CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG
|
||||
u8 reserved48[6];
|
||||
};
|
||||
|
||||
/* cmdq_query_func (size:128b/16B) */
|
||||
struct cmdq_query_func {
|
||||
u8 opcode;
|
||||
|
||||
@@ -1228,9 +1228,8 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
|
||||
if (!ctx->dev) {
|
||||
ctx->dev = c4iw_alloc(&ctx->lldi);
|
||||
if (IS_ERR(ctx->dev)) {
|
||||
pr_err("%s: initialization failed: %ld\n",
|
||||
pci_name(ctx->lldi.pdev),
|
||||
PTR_ERR(ctx->dev));
|
||||
pr_err("%s: initialization failed: %pe\n",
|
||||
pci_name(ctx->lldi.pdev), ctx->dev);
|
||||
ctx->dev = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
|
||||
/*
|
||||
* Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
|
||||
* Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "efa_com.h"
|
||||
@@ -30,6 +30,7 @@ struct efa_comp_ctx {
|
||||
struct efa_admin_acq_entry *user_cqe;
|
||||
u32 comp_size;
|
||||
enum efa_cmd_status status;
|
||||
u16 cmd_id;
|
||||
u8 cmd_opcode;
|
||||
u8 occupied;
|
||||
};
|
||||
@@ -333,6 +334,7 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
|
||||
comp_ctx->comp_size = comp_size_in_bytes;
|
||||
comp_ctx->user_cqe = comp;
|
||||
comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
|
||||
comp_ctx->cmd_id = cmd_id;
|
||||
|
||||
reinit_completion(&comp_ctx->wait_event);
|
||||
|
||||
@@ -557,17 +559,19 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com
|
||||
if (comp_ctx->status == EFA_CMD_COMPLETED)
|
||||
ibdev_err_ratelimited(
|
||||
aq->efa_dev,
|
||||
"The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
|
||||
"The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
|
||||
efa_com_cmd_str(comp_ctx->cmd_opcode),
|
||||
comp_ctx->cmd_opcode, comp_ctx->status,
|
||||
comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
|
||||
comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
|
||||
aq->cq.cc);
|
||||
else
|
||||
ibdev_err_ratelimited(
|
||||
aq->efa_dev,
|
||||
"The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
|
||||
"The device didn't send any completion for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
|
||||
efa_com_cmd_str(comp_ctx->cmd_opcode),
|
||||
comp_ctx->cmd_opcode, comp_ctx->status,
|
||||
comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
|
||||
comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
|
||||
aq->cq.cc);
|
||||
|
||||
clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
|
||||
err = -ETIME;
|
||||
@@ -631,9 +635,9 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
|
||||
if (IS_ERR(comp_ctx)) {
|
||||
ibdev_err_ratelimited(
|
||||
aq->efa_dev,
|
||||
"Failed to submit command %s (opcode %u) err %ld\n",
|
||||
"Failed to submit command %s (opcode %u) err %pe\n",
|
||||
efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
|
||||
cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
|
||||
cmd->aq_common_descriptor.opcode, comp_ctx);
|
||||
|
||||
up(&aq->avail_cmds);
|
||||
atomic64_inc(&aq->stats.cmd_err);
|
||||
|
||||
@@ -1788,7 +1788,8 @@ struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
|
||||
access_flags);
|
||||
if (IS_ERR(umem_dmabuf)) {
|
||||
err = PTR_ERR(umem_dmabuf);
|
||||
ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
|
||||
ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%pe]\n",
|
||||
umem_dmabuf);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
@@ -1832,7 +1833,8 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
|
||||
if (IS_ERR(mr->umem)) {
|
||||
err = PTR_ERR(mr->umem);
|
||||
ibdev_dbg(&dev->ibdev,
|
||||
"Failed to pin and map user space memory[%d]\n", err);
|
||||
"Failed to pin and map user space memory[%pe]\n",
|
||||
mr->umem);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
|
||||
@@ -149,7 +149,7 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
|
||||
req.phy_addr[0] = mr->mem.mtt->buf_dma;
|
||||
mtt_level = ERDMA_MR_MTT_1LEVEL;
|
||||
} else {
|
||||
req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist);
|
||||
req.phy_addr[0] = mr->mem.mtt->dma_addrs[0];
|
||||
mtt_level = mr->mem.mtt->level;
|
||||
}
|
||||
} else if (mr->type != ERDMA_MR_TYPE_DMA) {
|
||||
@@ -626,18 +626,27 @@ err_free_mtt:
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static void erdma_destroy_mtt_buf_sg(struct erdma_dev *dev,
|
||||
struct erdma_mtt *mtt)
|
||||
static void erdma_unmap_page_list(struct erdma_dev *dev, dma_addr_t *pg_dma,
|
||||
u32 npages)
|
||||
{
|
||||
dma_unmap_sg(&dev->pdev->dev, mtt->sglist,
|
||||
DIV_ROUND_UP(mtt->size, PAGE_SIZE), DMA_TO_DEVICE);
|
||||
vfree(mtt->sglist);
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < npages; i++)
|
||||
dma_unmap_page(&dev->pdev->dev, pg_dma[i], PAGE_SIZE,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static void erdma_destroy_mtt_buf_dma_addrs(struct erdma_dev *dev,
|
||||
struct erdma_mtt *mtt)
|
||||
{
|
||||
erdma_unmap_page_list(dev, mtt->dma_addrs, mtt->npages);
|
||||
vfree(mtt->dma_addrs);
|
||||
}
|
||||
|
||||
static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
|
||||
struct erdma_mtt *mtt)
|
||||
{
|
||||
erdma_destroy_mtt_buf_sg(dev, mtt);
|
||||
erdma_destroy_mtt_buf_dma_addrs(dev, mtt);
|
||||
vfree(mtt->buf);
|
||||
kfree(mtt);
|
||||
}
|
||||
@@ -645,50 +654,69 @@ static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
|
||||
static void erdma_init_middle_mtt(struct erdma_mtt *mtt,
|
||||
struct erdma_mtt *low_mtt)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
u32 idx = 0, i;
|
||||
dma_addr_t *pg_addr = mtt->buf;
|
||||
u32 i;
|
||||
|
||||
for_each_sg(low_mtt->sglist, sg, low_mtt->nsg, i)
|
||||
mtt->buf[idx++] = sg_dma_address(sg);
|
||||
for (i = 0; i < low_mtt->npages; i++)
|
||||
pg_addr[i] = low_mtt->dma_addrs[i];
|
||||
}
|
||||
|
||||
static int erdma_create_mtt_buf_sg(struct erdma_dev *dev, struct erdma_mtt *mtt)
|
||||
static u32 vmalloc_to_dma_addrs(struct erdma_dev *dev, dma_addr_t **dma_addrs,
|
||||
void *buf, u64 len)
|
||||
{
|
||||
struct scatterlist *sglist;
|
||||
void *buf = mtt->buf;
|
||||
u32 npages, i, nsg;
|
||||
dma_addr_t *pg_dma;
|
||||
struct page *pg;
|
||||
u32 npages, i;
|
||||
void *addr;
|
||||
|
||||
/* Failed if buf is not page aligned */
|
||||
if ((uintptr_t)buf & ~PAGE_MASK)
|
||||
return -EINVAL;
|
||||
npages = (PAGE_ALIGN((u64)buf + len) - PAGE_ALIGN_DOWN((u64)buf)) >>
|
||||
PAGE_SHIFT;
|
||||
pg_dma = vcalloc(npages, sizeof(*pg_dma));
|
||||
if (!pg_dma)
|
||||
return 0;
|
||||
|
||||
npages = DIV_ROUND_UP(mtt->size, PAGE_SIZE);
|
||||
sglist = vzalloc(npages * sizeof(*sglist));
|
||||
if (!sglist)
|
||||
return -ENOMEM;
|
||||
|
||||
sg_init_table(sglist, npages);
|
||||
addr = buf;
|
||||
for (i = 0; i < npages; i++) {
|
||||
pg = vmalloc_to_page(buf);
|
||||
pg = vmalloc_to_page(addr);
|
||||
if (!pg)
|
||||
goto err;
|
||||
sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
|
||||
buf += PAGE_SIZE;
|
||||
|
||||
pg_dma[i] = dma_map_page(&dev->pdev->dev, pg, 0, PAGE_SIZE,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&dev->pdev->dev, pg_dma[i]))
|
||||
goto err;
|
||||
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
nsg = dma_map_sg(&dev->pdev->dev, sglist, npages, DMA_TO_DEVICE);
|
||||
if (!nsg)
|
||||
goto err;
|
||||
*dma_addrs = pg_dma;
|
||||
|
||||
mtt->sglist = sglist;
|
||||
mtt->nsg = nsg;
|
||||
return npages;
|
||||
err:
|
||||
erdma_unmap_page_list(dev, pg_dma, i);
|
||||
vfree(pg_dma);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
vfree(sglist);
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
static int erdma_create_mtt_buf_dma_addrs(struct erdma_dev *dev,
|
||||
struct erdma_mtt *mtt)
|
||||
{
|
||||
dma_addr_t *addrs;
|
||||
u32 npages;
|
||||
|
||||
/* Failed if buf is not page aligned */
|
||||
if ((uintptr_t)mtt->buf & ~PAGE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
npages = vmalloc_to_dma_addrs(dev, &addrs, mtt->buf, mtt->size);
|
||||
if (!npages)
|
||||
return -ENOMEM;
|
||||
|
||||
mtt->dma_addrs = addrs;
|
||||
mtt->npages = npages;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
|
||||
@@ -707,12 +735,12 @@ static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
|
||||
if (!mtt->buf)
|
||||
goto err_free_mtt;
|
||||
|
||||
ret = erdma_create_mtt_buf_sg(dev, mtt);
|
||||
ret = erdma_create_mtt_buf_dma_addrs(dev, mtt);
|
||||
if (ret)
|
||||
goto err_free_mtt_buf;
|
||||
|
||||
ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, nsg:%u\n",
|
||||
mtt->size, mtt->nsg);
|
||||
ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, npages:%u\n",
|
||||
mtt->size, mtt->npages);
|
||||
|
||||
return mtt;
|
||||
|
||||
@@ -746,8 +774,8 @@ static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
|
||||
level = 1;
|
||||
|
||||
/* convergence the mtt table. */
|
||||
while (mtt->nsg != 1 && level <= 3) {
|
||||
tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->nsg));
|
||||
while (mtt->npages != 1 && level <= 3) {
|
||||
tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->npages));
|
||||
if (IS_ERR(tmp_mtt)) {
|
||||
ret = PTR_ERR(tmp_mtt);
|
||||
goto err_free_mtt;
|
||||
@@ -765,7 +793,7 @@ static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
|
||||
|
||||
mtt->level = level;
|
||||
ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n",
|
||||
mtt->level, mtt->sglist[0].dma_address);
|
||||
mtt->level, mtt->dma_addrs[0]);
|
||||
|
||||
return mtt;
|
||||
err_free_mtt:
|
||||
|
||||
@@ -99,8 +99,8 @@ struct erdma_mtt {
|
||||
union {
|
||||
dma_addr_t buf_dma;
|
||||
struct {
|
||||
struct scatterlist *sglist;
|
||||
u32 nsg;
|
||||
dma_addr_t *dma_addrs;
|
||||
u32 npages;
|
||||
u32 level;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -64,9 +64,9 @@ int hfi1_cdev_init(int minor, const char *name,
|
||||
|
||||
if (IS_ERR(device)) {
|
||||
ret = PTR_ERR(device);
|
||||
pr_err("Could not create device for minor %d, %s (err %pe)\n",
|
||||
minor, name, device);
|
||||
device = NULL;
|
||||
pr_err("Could not create device for minor %d, %s (err %d)\n",
|
||||
minor, name, -ret);
|
||||
cdev_del(cdev);
|
||||
}
|
||||
done:
|
||||
|
||||
@@ -990,7 +990,7 @@ ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
|
||||
}
|
||||
|
||||
/* Clean up old mappings */
|
||||
for_each_cpu(cpu, cpu_online_mask) {
|
||||
for_each_online_cpu(cpu) {
|
||||
struct sdma_rht_node *rht_node;
|
||||
|
||||
/* Don't cleanup sdes that are set in the new mask */
|
||||
|
||||
@@ -498,8 +498,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
||||
ntids, sizeof(*req->tids));
|
||||
if (IS_ERR(tmp)) {
|
||||
ret = PTR_ERR(tmp);
|
||||
SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
|
||||
ntids, ret);
|
||||
SDMA_DBG(req, "Failed to copy %d TIDs (%pe)", ntids,
|
||||
tmp);
|
||||
goto free_req;
|
||||
}
|
||||
req->tids = tmp;
|
||||
|
||||
@@ -594,8 +594,8 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
|
||||
mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
|
||||
buf_attr->user_access);
|
||||
if (IS_ERR(mtr->umem)) {
|
||||
ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
|
||||
PTR_ERR(mtr->umem));
|
||||
ibdev_err(ibdev, "failed to get umem, ret = %pe.\n",
|
||||
mtr->umem);
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
@@ -605,8 +605,8 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
|
||||
!mtr_has_mtt(buf_attr) ?
|
||||
HNS_ROCE_BUF_DIRECT : 0);
|
||||
if (IS_ERR(mtr->kmem)) {
|
||||
ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
|
||||
PTR_ERR(mtr->kmem));
|
||||
ibdev_err(ibdev, "failed to alloc kmem, ret = %pe.\n",
|
||||
mtr->kmem);
|
||||
return PTR_ERR(mtr->kmem);
|
||||
}
|
||||
}
|
||||
|
||||
15
drivers/infiniband/hw/ionic/Kconfig
Normal file
15
drivers/infiniband/hw/ionic/Kconfig
Normal file
@@ -0,0 +1,15 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Copyright (C) 2018-2025, Advanced Micro Devices, Inc.
|
||||
|
||||
config INFINIBAND_IONIC
|
||||
tristate "AMD Pensando DSC RDMA/RoCE Support"
|
||||
depends on NETDEVICES && ETHERNET && PCI && INET && IONIC
|
||||
help
|
||||
This enables RDMA/RoCE support for the AMD Pensando family of
|
||||
Distributed Services Cards (DSCs).
|
||||
|
||||
To learn more, visit our website at
|
||||
<https://www.amd.com/en/products/accelerators/pensando.html>.
|
||||
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called ionic_rdma.
|
||||
9
drivers/infiniband/hw/ionic/Makefile
Normal file
9
drivers/infiniband/hw/ionic/Makefile
Normal file
@@ -0,0 +1,9 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
ccflags-y := -I $(srctree)/drivers/net/ethernet/pensando/ionic
|
||||
|
||||
obj-$(CONFIG_INFINIBAND_IONIC) += ionic_rdma.o
|
||||
|
||||
ionic_rdma-y := \
|
||||
ionic_ibdev.o ionic_lif_cfg.o ionic_queue.o ionic_pgtbl.o ionic_admin.o \
|
||||
ionic_controlpath.o ionic_datapath.o ionic_hw_stats.o
|
||||
1229
drivers/infiniband/hw/ionic/ionic_admin.c
Normal file
1229
drivers/infiniband/hw/ionic/ionic_admin.c
Normal file
File diff suppressed because it is too large
Load Diff
2679
drivers/infiniband/hw/ionic/ionic_controlpath.c
Normal file
2679
drivers/infiniband/hw/ionic/ionic_controlpath.c
Normal file
File diff suppressed because it is too large
Load Diff
1399
drivers/infiniband/hw/ionic/ionic_datapath.c
Normal file
1399
drivers/infiniband/hw/ionic/ionic_datapath.c
Normal file
File diff suppressed because it is too large
Load Diff
1029
drivers/infiniband/hw/ionic/ionic_fw.h
Normal file
1029
drivers/infiniband/hw/ionic/ionic_fw.h
Normal file
File diff suppressed because it is too large
Load Diff
484
drivers/infiniband/hw/ionic/ionic_hw_stats.c
Normal file
484
drivers/infiniband/hw/ionic/ionic_hw_stats.c
Normal file
@@ -0,0 +1,484 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include "ionic_fw.h"
|
||||
#include "ionic_ibdev.h"
|
||||
|
||||
static int ionic_v1_stat_normalize(struct ionic_v1_stat *hw_stats,
|
||||
int hw_stats_count)
|
||||
{
|
||||
int hw_stat_i;
|
||||
|
||||
for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
|
||||
struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
|
||||
|
||||
stat->type_off = be32_to_cpu(stat->be_type_off);
|
||||
stat->name[sizeof(stat->name) - 1] = 0;
|
||||
if (ionic_v1_stat_type(stat) == IONIC_V1_STAT_TYPE_NONE)
|
||||
break;
|
||||
}
|
||||
|
||||
return hw_stat_i;
|
||||
}
|
||||
|
||||
static void ionic_fill_stats_desc(struct rdma_stat_desc *hw_stats_hdrs,
|
||||
struct ionic_v1_stat *hw_stats,
|
||||
int hw_stats_count)
|
||||
{
|
||||
int hw_stat_i;
|
||||
|
||||
for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
|
||||
struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
|
||||
|
||||
hw_stats_hdrs[hw_stat_i].name = stat->name;
|
||||
}
|
||||
}
|
||||
|
||||
static u64 ionic_v1_stat_val(struct ionic_v1_stat *stat,
|
||||
void *vals_buf, size_t vals_len)
|
||||
{
|
||||
unsigned int off = ionic_v1_stat_off(stat);
|
||||
int type = ionic_v1_stat_type(stat);
|
||||
|
||||
#define __ionic_v1_stat_validate(__type) \
|
||||
((off + sizeof(__type) <= vals_len) && \
|
||||
(IS_ALIGNED(off, sizeof(__type))))
|
||||
|
||||
switch (type) {
|
||||
case IONIC_V1_STAT_TYPE_8:
|
||||
if (__ionic_v1_stat_validate(u8))
|
||||
return *(u8 *)(vals_buf + off);
|
||||
break;
|
||||
case IONIC_V1_STAT_TYPE_LE16:
|
||||
if (__ionic_v1_stat_validate(__le16))
|
||||
return le16_to_cpu(*(__le16 *)(vals_buf + off));
|
||||
break;
|
||||
case IONIC_V1_STAT_TYPE_LE32:
|
||||
if (__ionic_v1_stat_validate(__le32))
|
||||
return le32_to_cpu(*(__le32 *)(vals_buf + off));
|
||||
break;
|
||||
case IONIC_V1_STAT_TYPE_LE64:
|
||||
if (__ionic_v1_stat_validate(__le64))
|
||||
return le64_to_cpu(*(__le64 *)(vals_buf + off));
|
||||
break;
|
||||
case IONIC_V1_STAT_TYPE_BE16:
|
||||
if (__ionic_v1_stat_validate(__be16))
|
||||
return be16_to_cpu(*(__be16 *)(vals_buf + off));
|
||||
break;
|
||||
case IONIC_V1_STAT_TYPE_BE32:
|
||||
if (__ionic_v1_stat_validate(__be32))
|
||||
return be32_to_cpu(*(__be32 *)(vals_buf + off));
|
||||
break;
|
||||
case IONIC_V1_STAT_TYPE_BE64:
|
||||
if (__ionic_v1_stat_validate(__be64))
|
||||
return be64_to_cpu(*(__be64 *)(vals_buf + off));
|
||||
break;
|
||||
}
|
||||
|
||||
return ~0ull;
|
||||
#undef __ionic_v1_stat_validate
|
||||
}
|
||||
|
||||
static int ionic_hw_stats_cmd(struct ionic_ibdev *dev,
|
||||
dma_addr_t dma, size_t len, int qid, int op)
|
||||
{
|
||||
struct ionic_admin_wr wr = {
|
||||
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
|
||||
.wqe = {
|
||||
.op = op,
|
||||
.len = cpu_to_le16(IONIC_ADMIN_STATS_HDRS_IN_V1_LEN),
|
||||
.cmd.stats = {
|
||||
.dma_addr = cpu_to_le64(dma),
|
||||
.length = cpu_to_le32(len),
|
||||
.id_ver = cpu_to_le32(qid),
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
if (dev->lif_cfg.admin_opcodes <= op)
|
||||
return -EBADRQC;
|
||||
|
||||
ionic_admin_post(dev, &wr);
|
||||
|
||||
return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_INTERRUPT);
|
||||
}
|
||||
|
||||
static int ionic_init_hw_stats(struct ionic_ibdev *dev)
|
||||
{
|
||||
dma_addr_t hw_stats_dma;
|
||||
int rc, hw_stats_count;
|
||||
|
||||
if (dev->hw_stats_hdrs)
|
||||
return 0;
|
||||
|
||||
dev->hw_stats_count = 0;
|
||||
|
||||
/* buffer for current values from the device */
|
||||
dev->hw_stats_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!dev->hw_stats_buf) {
|
||||
rc = -ENOMEM;
|
||||
goto err_buf;
|
||||
}
|
||||
|
||||
/* buffer for names, sizes, offsets of values */
|
||||
dev->hw_stats = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!dev->hw_stats) {
|
||||
rc = -ENOMEM;
|
||||
goto err_hw_stats;
|
||||
}
|
||||
|
||||
/* request the names, sizes, offsets */
|
||||
hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
|
||||
if (rc)
|
||||
goto err_dma;
|
||||
|
||||
rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, 0,
|
||||
IONIC_V1_ADMIN_STATS_HDRS);
|
||||
if (rc)
|
||||
goto err_cmd;
|
||||
|
||||
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
/* normalize and count the number of hw_stats */
|
||||
hw_stats_count =
|
||||
ionic_v1_stat_normalize(dev->hw_stats,
|
||||
PAGE_SIZE / sizeof(*dev->hw_stats));
|
||||
if (!hw_stats_count) {
|
||||
rc = -ENODATA;
|
||||
goto err_dma;
|
||||
}
|
||||
|
||||
dev->hw_stats_count = hw_stats_count;
|
||||
|
||||
/* alloc and init array of names, for alloc_hw_stats */
|
||||
dev->hw_stats_hdrs = kcalloc(hw_stats_count,
|
||||
sizeof(*dev->hw_stats_hdrs),
|
||||
GFP_KERNEL);
|
||||
if (!dev->hw_stats_hdrs) {
|
||||
rc = -ENOMEM;
|
||||
goto err_dma;
|
||||
}
|
||||
|
||||
ionic_fill_stats_desc(dev->hw_stats_hdrs, dev->hw_stats,
|
||||
hw_stats_count);
|
||||
|
||||
return 0;
|
||||
|
||||
err_cmd:
|
||||
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
err_dma:
|
||||
kfree(dev->hw_stats);
|
||||
err_hw_stats:
|
||||
kfree(dev->hw_stats_buf);
|
||||
err_buf:
|
||||
dev->hw_stats_count = 0;
|
||||
dev->hw_stats = NULL;
|
||||
dev->hw_stats_buf = NULL;
|
||||
dev->hw_stats_hdrs = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct rdma_hw_stats *ionic_alloc_hw_stats(struct ib_device *ibdev,
|
||||
u32 port)
|
||||
{
|
||||
struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
|
||||
|
||||
if (port != 1)
|
||||
return NULL;
|
||||
|
||||
return rdma_alloc_hw_stats_struct(dev->hw_stats_hdrs,
|
||||
dev->hw_stats_count,
|
||||
RDMA_HW_STATS_DEFAULT_LIFESPAN);
|
||||
}
|
||||
|
||||
static int ionic_get_hw_stats(struct ib_device *ibdev,
|
||||
struct rdma_hw_stats *hw_stats,
|
||||
u32 port, int index)
|
||||
{
|
||||
struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
|
||||
dma_addr_t hw_stats_dma;
|
||||
int rc, hw_stat_i;
|
||||
|
||||
if (port != 1)
|
||||
return -EINVAL;
|
||||
|
||||
hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats_buf,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
|
||||
if (rc)
|
||||
goto err_dma;
|
||||
|
||||
rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
|
||||
0, IONIC_V1_ADMIN_STATS_VALS);
|
||||
if (rc)
|
||||
goto err_cmd;
|
||||
|
||||
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
for (hw_stat_i = 0; hw_stat_i < dev->hw_stats_count; ++hw_stat_i)
|
||||
hw_stats->value[hw_stat_i] =
|
||||
ionic_v1_stat_val(&dev->hw_stats[hw_stat_i],
|
||||
dev->hw_stats_buf, PAGE_SIZE);
|
||||
|
||||
return hw_stat_i;
|
||||
|
||||
err_cmd:
|
||||
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
err_dma:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct rdma_hw_stats *
|
||||
ionic_counter_alloc_stats(struct rdma_counter *counter)
|
||||
{
|
||||
struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
|
||||
struct ionic_counter *cntr;
|
||||
int err;
|
||||
|
||||
cntr = kzalloc(sizeof(*cntr), GFP_KERNEL);
|
||||
if (!cntr)
|
||||
return NULL;
|
||||
|
||||
/* buffer for current values from the device */
|
||||
cntr->vals = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!cntr->vals)
|
||||
goto err_vals;
|
||||
|
||||
err = xa_alloc(&dev->counter_stats->xa_counters, &counter->id,
|
||||
cntr,
|
||||
XA_LIMIT(0, IONIC_MAX_QPID),
|
||||
GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_xa;
|
||||
|
||||
INIT_LIST_HEAD(&cntr->qp_list);
|
||||
|
||||
return rdma_alloc_hw_stats_struct(dev->counter_stats->stats_hdrs,
|
||||
dev->counter_stats->queue_stats_count,
|
||||
RDMA_HW_STATS_DEFAULT_LIFESPAN);
|
||||
err_xa:
|
||||
kfree(cntr->vals);
|
||||
err_vals:
|
||||
kfree(cntr);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int ionic_counter_dealloc(struct rdma_counter *counter)
|
||||
{
|
||||
struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
|
||||
struct ionic_counter *cntr;
|
||||
|
||||
cntr = xa_erase(&dev->counter_stats->xa_counters, counter->id);
|
||||
if (!cntr)
|
||||
return -EINVAL;
|
||||
|
||||
kfree(cntr->vals);
|
||||
kfree(cntr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ionic_counter_bind_qp(struct rdma_counter *counter,
|
||||
struct ib_qp *ibqp,
|
||||
u32 port)
|
||||
{
|
||||
struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
|
||||
struct ionic_qp *qp = to_ionic_qp(ibqp);
|
||||
struct ionic_counter *cntr;
|
||||
|
||||
cntr = xa_load(&dev->counter_stats->xa_counters, counter->id);
|
||||
if (!cntr)
|
||||
return -EINVAL;
|
||||
|
||||
list_add_tail(&qp->qp_list_counter, &cntr->qp_list);
|
||||
ibqp->counter = counter;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ionic_counter_unbind_qp(struct ib_qp *ibqp, u32 port)
|
||||
{
|
||||
struct ionic_qp *qp = to_ionic_qp(ibqp);
|
||||
|
||||
if (ibqp->counter) {
|
||||
list_del(&qp->qp_list_counter);
|
||||
ibqp->counter = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ionic_get_qp_stats(struct ib_device *ibdev,
|
||||
struct rdma_hw_stats *hw_stats,
|
||||
u32 counter_id)
|
||||
{
|
||||
struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
|
||||
struct ionic_counter_stats *cs;
|
||||
struct ionic_counter *cntr;
|
||||
dma_addr_t hw_stats_dma;
|
||||
struct ionic_qp *qp;
|
||||
int rc, stat_i = 0;
|
||||
|
||||
cs = dev->counter_stats;
|
||||
cntr = xa_load(&cs->xa_counters, counter_id);
|
||||
if (!cntr)
|
||||
return -EINVAL;
|
||||
|
||||
hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, cntr->vals,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
memset(hw_stats->value, 0, sizeof(u64) * hw_stats->num_counters);
|
||||
|
||||
list_for_each_entry(qp, &cntr->qp_list, qp_list_counter) {
|
||||
rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
|
||||
qp->qpid,
|
||||
IONIC_V1_ADMIN_QP_STATS_VALS);
|
||||
if (rc)
|
||||
goto err_cmd;
|
||||
|
||||
for (stat_i = 0; stat_i < cs->queue_stats_count; ++stat_i)
|
||||
hw_stats->value[stat_i] +=
|
||||
ionic_v1_stat_val(&cs->hdr[stat_i],
|
||||
cntr->vals,
|
||||
PAGE_SIZE);
|
||||
}
|
||||
|
||||
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
return stat_i;
|
||||
|
||||
err_cmd:
|
||||
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int ionic_counter_update_stats(struct rdma_counter *counter)
|
||||
{
|
||||
return ionic_get_qp_stats(counter->device, counter->stats, counter->id);
|
||||
}
|
||||
|
||||
static int ionic_alloc_counters(struct ionic_ibdev *dev)
|
||||
{
|
||||
struct ionic_counter_stats *cs = dev->counter_stats;
|
||||
int rc, hw_stats_count;
|
||||
dma_addr_t hdr_dma;
|
||||
|
||||
/* buffer for names, sizes, offsets of values */
|
||||
cs->hdr = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!cs->hdr)
|
||||
return -ENOMEM;
|
||||
|
||||
hdr_dma = dma_map_single(dev->lif_cfg.hwdev, cs->hdr,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
|
||||
if (rc)
|
||||
goto err_dma;
|
||||
|
||||
rc = ionic_hw_stats_cmd(dev, hdr_dma, PAGE_SIZE, 0,
|
||||
IONIC_V1_ADMIN_QP_STATS_HDRS);
|
||||
if (rc)
|
||||
goto err_cmd;
|
||||
|
||||
dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
/* normalize and count the number of hw_stats */
|
||||
hw_stats_count = ionic_v1_stat_normalize(cs->hdr,
|
||||
PAGE_SIZE / sizeof(*cs->hdr));
|
||||
if (!hw_stats_count) {
|
||||
rc = -ENODATA;
|
||||
goto err_dma;
|
||||
}
|
||||
|
||||
cs->queue_stats_count = hw_stats_count;
|
||||
|
||||
/* alloc and init array of names */
|
||||
cs->stats_hdrs = kcalloc(hw_stats_count, sizeof(*cs->stats_hdrs),
|
||||
GFP_KERNEL);
|
||||
if (!cs->stats_hdrs) {
|
||||
rc = -ENOMEM;
|
||||
goto err_dma;
|
||||
}
|
||||
|
||||
ionic_fill_stats_desc(cs->stats_hdrs, cs->hdr, hw_stats_count);
|
||||
|
||||
return 0;
|
||||
|
||||
err_cmd:
|
||||
dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
err_dma:
|
||||
kfree(cs->hdr);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct ib_device_ops ionic_hw_stats_ops = {
|
||||
.driver_id = RDMA_DRIVER_IONIC,
|
||||
.alloc_hw_port_stats = ionic_alloc_hw_stats,
|
||||
.get_hw_stats = ionic_get_hw_stats,
|
||||
};
|
||||
|
||||
static const struct ib_device_ops ionic_counter_stats_ops = {
|
||||
.counter_alloc_stats = ionic_counter_alloc_stats,
|
||||
.counter_dealloc = ionic_counter_dealloc,
|
||||
.counter_bind_qp = ionic_counter_bind_qp,
|
||||
.counter_unbind_qp = ionic_counter_unbind_qp,
|
||||
.counter_update_stats = ionic_counter_update_stats,
|
||||
};
|
||||
|
||||
void ionic_stats_init(struct ionic_ibdev *dev)
|
||||
{
|
||||
u16 stats_type = dev->lif_cfg.stats_type;
|
||||
int rc;
|
||||
|
||||
if (stats_type & IONIC_LIF_RDMA_STAT_GLOBAL) {
|
||||
rc = ionic_init_hw_stats(dev);
|
||||
if (rc)
|
||||
ibdev_dbg(&dev->ibdev, "Failed to init hw stats\n");
|
||||
else
|
||||
ib_set_device_ops(&dev->ibdev, &ionic_hw_stats_ops);
|
||||
}
|
||||
|
||||
if (stats_type & IONIC_LIF_RDMA_STAT_QP) {
|
||||
dev->counter_stats = kzalloc(sizeof(*dev->counter_stats),
|
||||
GFP_KERNEL);
|
||||
if (!dev->counter_stats)
|
||||
return;
|
||||
|
||||
rc = ionic_alloc_counters(dev);
|
||||
if (rc) {
|
||||
ibdev_dbg(&dev->ibdev, "Failed to init counter stats\n");
|
||||
kfree(dev->counter_stats);
|
||||
dev->counter_stats = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
xa_init_flags(&dev->counter_stats->xa_counters, XA_FLAGS_ALLOC);
|
||||
|
||||
ib_set_device_ops(&dev->ibdev, &ionic_counter_stats_ops);
|
||||
}
|
||||
}
|
||||
|
||||
void ionic_stats_cleanup(struct ionic_ibdev *dev)
|
||||
{
|
||||
if (dev->counter_stats) {
|
||||
xa_destroy(&dev->counter_stats->xa_counters);
|
||||
kfree(dev->counter_stats->hdr);
|
||||
kfree(dev->counter_stats->stats_hdrs);
|
||||
kfree(dev->counter_stats);
|
||||
dev->counter_stats = NULL;
|
||||
}
|
||||
|
||||
kfree(dev->hw_stats);
|
||||
kfree(dev->hw_stats_buf);
|
||||
kfree(dev->hw_stats_hdrs);
|
||||
}
|
||||
440
drivers/infiniband/hw/ionic/ionic_ibdev.c
Normal file
440
drivers/infiniband/hw/ionic/ionic_ibdev.c
Normal file
@@ -0,0 +1,440 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/irq.h>
|
||||
#include <net/addrconf.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
#include <rdma/ib_mad.h>
|
||||
|
||||
#include "ionic_ibdev.h"
|
||||
|
||||
#define DRIVER_DESCRIPTION "AMD Pensando RoCE HCA driver"
|
||||
#define DEVICE_DESCRIPTION "AMD Pensando RoCE HCA"
|
||||
|
||||
MODULE_AUTHOR("Allen Hubbe <allen.hubbe@amd.com>");
|
||||
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_IMPORT_NS("NET_IONIC");
|
||||
|
||||
static int ionic_query_device(struct ib_device *ibdev,
|
||||
struct ib_device_attr *attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
|
||||
struct net_device *ndev;
|
||||
|
||||
ndev = ib_device_get_netdev(ibdev, 1);
|
||||
addrconf_ifid_eui48((u8 *)&attr->sys_image_guid, ndev);
|
||||
dev_put(ndev);
|
||||
attr->max_mr_size = dev->lif_cfg.npts_per_lif * PAGE_SIZE / 2;
|
||||
attr->page_size_cap = dev->lif_cfg.page_size_supported;
|
||||
|
||||
attr->vendor_id = to_pci_dev(dev->lif_cfg.hwdev)->vendor;
|
||||
attr->vendor_part_id = to_pci_dev(dev->lif_cfg.hwdev)->device;
|
||||
|
||||
attr->hw_ver = ionic_lif_asic_rev(dev->lif_cfg.lif);
|
||||
attr->fw_ver = 0;
|
||||
attr->max_qp = dev->lif_cfg.qp_count;
|
||||
attr->max_qp_wr = IONIC_MAX_DEPTH;
|
||||
attr->device_cap_flags =
|
||||
IB_DEVICE_MEM_WINDOW |
|
||||
IB_DEVICE_MEM_MGT_EXTENSIONS |
|
||||
IB_DEVICE_MEM_WINDOW_TYPE_2B |
|
||||
0;
|
||||
attr->max_send_sge =
|
||||
min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
|
||||
IONIC_SPEC_HIGH);
|
||||
attr->max_recv_sge =
|
||||
min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
|
||||
IONIC_SPEC_HIGH);
|
||||
attr->max_sge_rd = attr->max_send_sge;
|
||||
attr->max_cq = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
|
||||
attr->max_cqe = IONIC_MAX_CQ_DEPTH - IONIC_CQ_GRACE;
|
||||
attr->max_mr = dev->lif_cfg.nmrs_per_lif;
|
||||
attr->max_pd = IONIC_MAX_PD;
|
||||
attr->max_qp_rd_atom = IONIC_MAX_RD_ATOM;
|
||||
attr->max_ee_rd_atom = 0;
|
||||
attr->max_res_rd_atom = IONIC_MAX_RD_ATOM;
|
||||
attr->max_qp_init_rd_atom = IONIC_MAX_RD_ATOM;
|
||||
attr->max_ee_init_rd_atom = 0;
|
||||
attr->atomic_cap = IB_ATOMIC_GLOB;
|
||||
attr->masked_atomic_cap = IB_ATOMIC_GLOB;
|
||||
attr->max_mw = dev->lif_cfg.nmrs_per_lif;
|
||||
attr->max_mcast_grp = 0;
|
||||
attr->max_mcast_qp_attach = 0;
|
||||
attr->max_ah = dev->lif_cfg.nahs_per_lif;
|
||||
attr->max_fast_reg_page_list_len = dev->lif_cfg.npts_per_lif / 2;
|
||||
attr->max_pkeys = IONIC_PKEY_TBL_LEN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ionic_query_port(struct ib_device *ibdev, u32 port,
|
||||
struct ib_port_attr *attr)
|
||||
{
|
||||
struct net_device *ndev;
|
||||
|
||||
if (port != 1)
|
||||
return -EINVAL;
|
||||
|
||||
ndev = ib_device_get_netdev(ibdev, port);
|
||||
|
||||
if (netif_running(ndev) && netif_carrier_ok(ndev)) {
|
||||
attr->state = IB_PORT_ACTIVE;
|
||||
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
|
||||
} else if (netif_running(ndev)) {
|
||||
attr->state = IB_PORT_DOWN;
|
||||
attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
|
||||
} else {
|
||||
attr->state = IB_PORT_DOWN;
|
||||
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
|
||||
}
|
||||
|
||||
attr->max_mtu = iboe_get_mtu(ndev->max_mtu);
|
||||
attr->active_mtu = min(attr->max_mtu, iboe_get_mtu(ndev->mtu));
|
||||
attr->gid_tbl_len = IONIC_GID_TBL_LEN;
|
||||
attr->ip_gids = true;
|
||||
attr->port_cap_flags = 0;
|
||||
attr->max_msg_sz = 0x80000000;
|
||||
attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN;
|
||||
attr->max_vl_num = 1;
|
||||
attr->subnet_prefix = 0xfe80000000000000ull;
|
||||
|
||||
dev_put(ndev);
|
||||
|
||||
return ib_get_eth_speed(ibdev, port,
|
||||
&attr->active_speed,
|
||||
&attr->active_width);
|
||||
}
|
||||
|
||||
static enum rdma_link_layer ionic_get_link_layer(struct ib_device *ibdev,
|
||||
u32 port)
|
||||
{
|
||||
return IB_LINK_LAYER_ETHERNET;
|
||||
}
|
||||
|
||||
static int ionic_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
|
||||
u16 *pkey)
|
||||
{
|
||||
if (port != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (index != 0)
|
||||
return -EINVAL;
|
||||
|
||||
*pkey = IB_DEFAULT_PKEY_FULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ionic_modify_device(struct ib_device *ibdev, int mask,
|
||||
struct ib_device_modify *attr)
|
||||
{
|
||||
struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
|
||||
|
||||
if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (mask & IB_DEVICE_MODIFY_NODE_DESC)
|
||||
memcpy(dev->ibdev.node_desc, attr->node_desc,
|
||||
IB_DEVICE_NODE_DESC_MAX);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ionic_get_port_immutable(struct ib_device *ibdev, u32 port,
|
||||
struct ib_port_immutable *attr)
|
||||
{
|
||||
if (port != 1)
|
||||
return -EINVAL;
|
||||
|
||||
attr->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
|
||||
|
||||
attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN;
|
||||
attr->gid_tbl_len = IONIC_GID_TBL_LEN;
|
||||
attr->max_mad_size = IB_MGMT_MAD_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ionic_get_dev_fw_str(struct ib_device *ibdev, char *str)
|
||||
{
|
||||
struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
|
||||
|
||||
ionic_lif_fw_version(dev->lif_cfg.lif, str, IB_FW_VERSION_NAME_MAX);
|
||||
}
|
||||
|
||||
static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct ionic_ibdev *dev =
|
||||
rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev);
|
||||
|
||||
return sysfs_emit(buf, "0x%x\n", ionic_lif_asic_rev(dev->lif_cfg.lif));
|
||||
}
|
||||
static DEVICE_ATTR_RO(hw_rev);
|
||||
|
||||
static ssize_t hca_type_show(struct device *device,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct ionic_ibdev *dev =
|
||||
rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev);
|
||||
|
||||
return sysfs_emit(buf, "%s\n", dev->ibdev.node_desc);
|
||||
}
|
||||
static DEVICE_ATTR_RO(hca_type);
|
||||
|
||||
static struct attribute *ionic_rdma_attributes[] = {
|
||||
&dev_attr_hw_rev.attr,
|
||||
&dev_attr_hca_type.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct attribute_group ionic_rdma_attr_group = {
|
||||
.attrs = ionic_rdma_attributes,
|
||||
};
|
||||
|
||||
static void ionic_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
||||
{
|
||||
/*
|
||||
* Dummy define disassociate_ucontext so that it does not
|
||||
* wait for user context before cleaning up hw resources.
|
||||
*/
|
||||
}
|
||||
|
||||
static const struct ib_device_ops ionic_dev_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.driver_id = RDMA_DRIVER_IONIC,
|
||||
.uverbs_abi_ver = IONIC_ABI_VERSION,
|
||||
|
||||
.alloc_ucontext = ionic_alloc_ucontext,
|
||||
.dealloc_ucontext = ionic_dealloc_ucontext,
|
||||
.mmap = ionic_mmap,
|
||||
.mmap_free = ionic_mmap_free,
|
||||
.alloc_pd = ionic_alloc_pd,
|
||||
.dealloc_pd = ionic_dealloc_pd,
|
||||
.create_ah = ionic_create_ah,
|
||||
.query_ah = ionic_query_ah,
|
||||
.destroy_ah = ionic_destroy_ah,
|
||||
.create_user_ah = ionic_create_ah,
|
||||
.get_dma_mr = ionic_get_dma_mr,
|
||||
.reg_user_mr = ionic_reg_user_mr,
|
||||
.reg_user_mr_dmabuf = ionic_reg_user_mr_dmabuf,
|
||||
.dereg_mr = ionic_dereg_mr,
|
||||
.alloc_mr = ionic_alloc_mr,
|
||||
.map_mr_sg = ionic_map_mr_sg,
|
||||
.alloc_mw = ionic_alloc_mw,
|
||||
.dealloc_mw = ionic_dealloc_mw,
|
||||
.create_cq = ionic_create_cq,
|
||||
.destroy_cq = ionic_destroy_cq,
|
||||
.create_qp = ionic_create_qp,
|
||||
.modify_qp = ionic_modify_qp,
|
||||
.query_qp = ionic_query_qp,
|
||||
.destroy_qp = ionic_destroy_qp,
|
||||
|
||||
.post_send = ionic_post_send,
|
||||
.post_recv = ionic_post_recv,
|
||||
.poll_cq = ionic_poll_cq,
|
||||
.req_notify_cq = ionic_req_notify_cq,
|
||||
|
||||
.query_device = ionic_query_device,
|
||||
.query_port = ionic_query_port,
|
||||
.get_link_layer = ionic_get_link_layer,
|
||||
.query_pkey = ionic_query_pkey,
|
||||
.modify_device = ionic_modify_device,
|
||||
.get_port_immutable = ionic_get_port_immutable,
|
||||
.get_dev_fw_str = ionic_get_dev_fw_str,
|
||||
.device_group = &ionic_rdma_attr_group,
|
||||
.disassociate_ucontext = ionic_disassociate_ucontext,
|
||||
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, ionic_ctx, ibctx),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, ionic_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, ionic_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, ionic_vcq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, ionic_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_mw, ionic_mr, ibmw),
|
||||
};
|
||||
|
||||
static void ionic_init_resids(struct ionic_ibdev *dev)
|
||||
{
|
||||
ionic_resid_init(&dev->inuse_cqid, dev->lif_cfg.cq_count);
|
||||
dev->half_cqid_udma_shift =
|
||||
order_base_2(dev->lif_cfg.cq_count / dev->lif_cfg.udma_count);
|
||||
ionic_resid_init(&dev->inuse_pdid, IONIC_MAX_PD);
|
||||
ionic_resid_init(&dev->inuse_ahid, dev->lif_cfg.nahs_per_lif);
|
||||
ionic_resid_init(&dev->inuse_mrid, dev->lif_cfg.nmrs_per_lif);
|
||||
/* skip reserved lkey */
|
||||
dev->next_mrkey = 1;
|
||||
ionic_resid_init(&dev->inuse_qpid, dev->lif_cfg.qp_count);
|
||||
/* skip reserved SMI and GSI qpids */
|
||||
dev->half_qpid_udma_shift =
|
||||
order_base_2(dev->lif_cfg.qp_count / dev->lif_cfg.udma_count);
|
||||
ionic_resid_init(&dev->inuse_dbid, dev->lif_cfg.dbid_count);
|
||||
}
|
||||
|
||||
static void ionic_destroy_resids(struct ionic_ibdev *dev)
|
||||
{
|
||||
ionic_resid_destroy(&dev->inuse_cqid);
|
||||
ionic_resid_destroy(&dev->inuse_pdid);
|
||||
ionic_resid_destroy(&dev->inuse_ahid);
|
||||
ionic_resid_destroy(&dev->inuse_mrid);
|
||||
ionic_resid_destroy(&dev->inuse_qpid);
|
||||
ionic_resid_destroy(&dev->inuse_dbid);
|
||||
}
|
||||
|
||||
static void ionic_destroy_ibdev(struct ionic_ibdev *dev)
|
||||
{
|
||||
ionic_kill_rdma_admin(dev, false);
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
ionic_stats_cleanup(dev);
|
||||
ionic_destroy_rdma_admin(dev);
|
||||
ionic_destroy_resids(dev);
|
||||
WARN_ON(!xa_empty(&dev->qp_tbl));
|
||||
xa_destroy(&dev->qp_tbl);
|
||||
WARN_ON(!xa_empty(&dev->cq_tbl));
|
||||
xa_destroy(&dev->cq_tbl);
|
||||
ib_dealloc_device(&dev->ibdev);
|
||||
}
|
||||
|
||||
static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
|
||||
{
|
||||
struct ib_device *ibdev;
|
||||
struct ionic_ibdev *dev;
|
||||
struct net_device *ndev;
|
||||
int rc;
|
||||
|
||||
dev = ib_alloc_device(ionic_ibdev, ibdev);
|
||||
if (!dev)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ionic_fill_lif_cfg(ionic_adev->lif, &dev->lif_cfg);
|
||||
|
||||
xa_init_flags(&dev->qp_tbl, GFP_ATOMIC);
|
||||
xa_init_flags(&dev->cq_tbl, GFP_ATOMIC);
|
||||
|
||||
ionic_init_resids(dev);
|
||||
|
||||
rc = ionic_rdma_reset_devcmd(dev);
|
||||
if (rc)
|
||||
goto err_reset;
|
||||
|
||||
rc = ionic_create_rdma_admin(dev);
|
||||
if (rc)
|
||||
goto err_admin;
|
||||
|
||||
ibdev = &dev->ibdev;
|
||||
ibdev->dev.parent = dev->lif_cfg.hwdev;
|
||||
|
||||
strscpy(ibdev->name, "ionic_%d", IB_DEVICE_NAME_MAX);
|
||||
strscpy(ibdev->node_desc, DEVICE_DESCRIPTION, IB_DEVICE_NODE_DESC_MAX);
|
||||
|
||||
ibdev->node_type = RDMA_NODE_IB_CA;
|
||||
ibdev->phys_port_cnt = 1;
|
||||
|
||||
/* the first two eq are reserved for async events */
|
||||
ibdev->num_comp_vectors = dev->lif_cfg.eq_count - 2;
|
||||
|
||||
ndev = ionic_lif_netdev(ionic_adev->lif);
|
||||
addrconf_ifid_eui48((u8 *)&ibdev->node_guid, ndev);
|
||||
rc = ib_device_set_netdev(ibdev, ndev, 1);
|
||||
/* ionic_lif_netdev() returns ndev with refcount held */
|
||||
dev_put(ndev);
|
||||
if (rc)
|
||||
goto err_admin;
|
||||
|
||||
ib_set_device_ops(&dev->ibdev, &ionic_dev_ops);
|
||||
|
||||
ionic_stats_init(dev);
|
||||
|
||||
rc = ib_register_device(ibdev, "ionic_%d", ibdev->dev.parent);
|
||||
if (rc)
|
||||
goto err_register;
|
||||
|
||||
return dev;
|
||||
|
||||
err_register:
|
||||
ionic_stats_cleanup(dev);
|
||||
err_admin:
|
||||
ionic_kill_rdma_admin(dev, false);
|
||||
ionic_destroy_rdma_admin(dev);
|
||||
err_reset:
|
||||
ionic_destroy_resids(dev);
|
||||
xa_destroy(&dev->qp_tbl);
|
||||
xa_destroy(&dev->cq_tbl);
|
||||
ib_dealloc_device(&dev->ibdev);
|
||||
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
static int ionic_aux_probe(struct auxiliary_device *adev,
|
||||
const struct auxiliary_device_id *id)
|
||||
{
|
||||
struct ionic_aux_dev *ionic_adev;
|
||||
struct ionic_ibdev *dev;
|
||||
|
||||
ionic_adev = container_of(adev, struct ionic_aux_dev, adev);
|
||||
dev = ionic_create_ibdev(ionic_adev);
|
||||
if (IS_ERR(dev))
|
||||
return dev_err_probe(&adev->dev, PTR_ERR(dev),
|
||||
"Failed to register ibdev\n");
|
||||
|
||||
auxiliary_set_drvdata(adev, dev);
|
||||
ibdev_dbg(&dev->ibdev, "registered\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ionic_aux_remove(struct auxiliary_device *adev)
|
||||
{
|
||||
struct ionic_ibdev *dev = auxiliary_get_drvdata(adev);
|
||||
|
||||
dev_dbg(&adev->dev, "unregister ibdev\n");
|
||||
ionic_destroy_ibdev(dev);
|
||||
dev_dbg(&adev->dev, "unregistered\n");
|
||||
}
|
||||
|
||||
static const struct auxiliary_device_id ionic_aux_id_table[] = {
|
||||
{ .name = "ionic.rdma", },
|
||||
{},
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(auxiliary, ionic_aux_id_table);
|
||||
|
||||
static struct auxiliary_driver ionic_aux_r_driver = {
|
||||
.name = "rdma",
|
||||
.probe = ionic_aux_probe,
|
||||
.remove = ionic_aux_remove,
|
||||
.id_table = ionic_aux_id_table,
|
||||
};
|
||||
|
||||
static int __init ionic_mod_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
ionic_evt_workq = create_workqueue(KBUILD_MODNAME "-evt");
|
||||
if (!ionic_evt_workq)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = auxiliary_driver_register(&ionic_aux_r_driver);
|
||||
if (rc)
|
||||
goto err_aux;
|
||||
|
||||
return 0;
|
||||
|
||||
err_aux:
|
||||
destroy_workqueue(ionic_evt_workq);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit ionic_mod_exit(void)
|
||||
{
|
||||
auxiliary_driver_unregister(&ionic_aux_r_driver);
|
||||
destroy_workqueue(ionic_evt_workq);
|
||||
}
|
||||
|
||||
module_init(ionic_mod_init);
|
||||
module_exit(ionic_mod_exit);
|
||||
517
drivers/infiniband/hw/ionic/ionic_ibdev.h
Normal file
517
drivers/infiniband/hw/ionic/ionic_ibdev.h
Normal file
@@ -0,0 +1,517 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
|
||||
|
||||
#ifndef _IONIC_IBDEV_H_
|
||||
#define _IONIC_IBDEV_H_
|
||||
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_pack.h>
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
|
||||
#include <rdma/ionic-abi.h>
|
||||
#include <ionic_api.h>
|
||||
#include <ionic_regs.h>
|
||||
|
||||
#include "ionic_fw.h"
|
||||
#include "ionic_queue.h"
|
||||
#include "ionic_res.h"
|
||||
|
||||
#include "ionic_lif_cfg.h"
|
||||
|
||||
/* Config knobs */
|
||||
#define IONIC_EQ_DEPTH 511
|
||||
#define IONIC_EQ_COUNT 32
|
||||
#define IONIC_AQ_DEPTH 63
|
||||
#define IONIC_AQ_COUNT 4
|
||||
#define IONIC_EQ_ISR_BUDGET 10
|
||||
#define IONIC_EQ_WORK_BUDGET 1000
|
||||
#define IONIC_MAX_RD_ATOM 16
|
||||
#define IONIC_PKEY_TBL_LEN 1
|
||||
#define IONIC_GID_TBL_LEN 256
|
||||
|
||||
#define IONIC_MAX_QPID 0xffffff
|
||||
#define IONIC_SPEC_HIGH 8
|
||||
#define IONIC_MAX_PD 1024
|
||||
#define IONIC_SPEC_HIGH 8
|
||||
#define IONIC_SQCMB_ORDER 5
|
||||
#define IONIC_RQCMB_ORDER 0
|
||||
|
||||
#define IONIC_META_LAST ((void *)1ul)
|
||||
#define IONIC_META_POSTED ((void *)2ul)
|
||||
|
||||
#define IONIC_CQ_GRACE 100
|
||||
|
||||
#define IONIC_ROCE_UDP_SPORT 28272
|
||||
#define IONIC_DMA_LKEY 0
|
||||
#define IONIC_DMA_RKEY IONIC_DMA_LKEY
|
||||
|
||||
#define IONIC_CMB_SUPPORTED \
|
||||
(IONIC_CMB_ENABLE | IONIC_CMB_REQUIRE | IONIC_CMB_EXPDB | \
|
||||
IONIC_CMB_WC | IONIC_CMB_UC)
|
||||
|
||||
/* resource is not reserved on the device, indicated in tbl_order */
|
||||
#define IONIC_RES_INVALID -1
|
||||
|
||||
struct ionic_aq;
|
||||
struct ionic_cq;
|
||||
struct ionic_eq;
|
||||
struct ionic_vcq;
|
||||
|
||||
enum ionic_admin_state {
|
||||
IONIC_ADMIN_ACTIVE, /* submitting admin commands to queue */
|
||||
IONIC_ADMIN_PAUSED, /* not submitting, but may complete normally */
|
||||
IONIC_ADMIN_KILLED, /* not submitting, locally completed */
|
||||
};
|
||||
|
||||
enum ionic_admin_flags {
|
||||
IONIC_ADMIN_F_BUSYWAIT = BIT(0), /* Don't sleep */
|
||||
IONIC_ADMIN_F_TEARDOWN = BIT(1), /* In destroy path */
|
||||
IONIC_ADMIN_F_INTERRUPT = BIT(2), /* Interruptible w/timeout */
|
||||
};
|
||||
|
||||
enum ionic_mmap_flag {
|
||||
IONIC_MMAP_WC = BIT(0),
|
||||
};
|
||||
|
||||
struct ionic_mmap_entry {
|
||||
struct rdma_user_mmap_entry rdma_entry;
|
||||
unsigned long size;
|
||||
unsigned long pfn;
|
||||
u8 mmap_flags;
|
||||
};
|
||||
|
||||
struct ionic_ibdev {
|
||||
struct ib_device ibdev;
|
||||
|
||||
struct ionic_lif_cfg lif_cfg;
|
||||
|
||||
struct xarray qp_tbl;
|
||||
struct xarray cq_tbl;
|
||||
|
||||
struct ionic_resid_bits inuse_dbid;
|
||||
struct ionic_resid_bits inuse_pdid;
|
||||
struct ionic_resid_bits inuse_ahid;
|
||||
struct ionic_resid_bits inuse_mrid;
|
||||
struct ionic_resid_bits inuse_qpid;
|
||||
struct ionic_resid_bits inuse_cqid;
|
||||
|
||||
u8 half_cqid_udma_shift;
|
||||
u8 half_qpid_udma_shift;
|
||||
u8 next_qpid_udma_idx;
|
||||
u8 next_mrkey;
|
||||
|
||||
struct work_struct reset_work;
|
||||
bool reset_posted;
|
||||
u32 reset_cnt;
|
||||
|
||||
struct delayed_work admin_dwork;
|
||||
struct ionic_aq **aq_vec;
|
||||
atomic_t admin_state;
|
||||
|
||||
struct ionic_eq **eq_vec;
|
||||
|
||||
struct ionic_v1_stat *hw_stats;
|
||||
void *hw_stats_buf;
|
||||
struct rdma_stat_desc *hw_stats_hdrs;
|
||||
struct ionic_counter_stats *counter_stats;
|
||||
int hw_stats_count;
|
||||
};
|
||||
|
||||
struct ionic_eq {
|
||||
struct ionic_ibdev *dev;
|
||||
|
||||
u32 eqid;
|
||||
u32 intr;
|
||||
|
||||
struct ionic_queue q;
|
||||
|
||||
int armed;
|
||||
bool enable;
|
||||
|
||||
struct work_struct work;
|
||||
|
||||
int irq;
|
||||
char name[32];
|
||||
};
|
||||
|
||||
struct ionic_admin_wr {
|
||||
struct completion work;
|
||||
struct list_head aq_ent;
|
||||
struct ionic_v1_admin_wqe wqe;
|
||||
struct ionic_v1_cqe cqe;
|
||||
struct ionic_aq *aq;
|
||||
int status;
|
||||
};
|
||||
|
||||
struct ionic_admin_wr_q {
|
||||
struct ionic_admin_wr *wr;
|
||||
int wqe_strides;
|
||||
};
|
||||
|
||||
struct ionic_aq {
|
||||
struct ionic_ibdev *dev;
|
||||
struct ionic_vcq *vcq;
|
||||
|
||||
struct work_struct work;
|
||||
|
||||
atomic_t admin_state;
|
||||
unsigned long stamp;
|
||||
bool armed;
|
||||
|
||||
u32 aqid;
|
||||
u32 cqid;
|
||||
|
||||
spinlock_t lock; /* for posting */
|
||||
struct ionic_queue q;
|
||||
struct ionic_admin_wr_q *q_wr;
|
||||
struct list_head wr_prod;
|
||||
struct list_head wr_post;
|
||||
};
|
||||
|
||||
struct ionic_ctx {
|
||||
struct ib_ucontext ibctx;
|
||||
u32 dbid;
|
||||
struct rdma_user_mmap_entry *mmap_dbell;
|
||||
};
|
||||
|
||||
struct ionic_tbl_buf {
|
||||
u32 tbl_limit;
|
||||
u32 tbl_pages;
|
||||
size_t tbl_size;
|
||||
__le64 *tbl_buf;
|
||||
dma_addr_t tbl_dma;
|
||||
u8 page_size_log2;
|
||||
};
|
||||
|
||||
struct ionic_pd {
|
||||
struct ib_pd ibpd;
|
||||
|
||||
u32 pdid;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
struct ionic_cq {
|
||||
struct ionic_vcq *vcq;
|
||||
|
||||
u32 cqid;
|
||||
u32 eqid;
|
||||
|
||||
spinlock_t lock; /* for polling */
|
||||
struct list_head poll_sq;
|
||||
bool flush;
|
||||
struct list_head flush_sq;
|
||||
struct list_head flush_rq;
|
||||
struct list_head ibkill_flush_ent;
|
||||
|
||||
struct ionic_queue q;
|
||||
bool color;
|
||||
int credit;
|
||||
u16 arm_any_prod;
|
||||
u16 arm_sol_prod;
|
||||
|
||||
struct kref cq_kref;
|
||||
struct completion cq_rel_comp;
|
||||
|
||||
/* infrequently accessed, keep at end */
|
||||
struct ib_umem *umem;
|
||||
};
|
||||
|
||||
struct ionic_vcq {
|
||||
struct ib_cq ibcq;
|
||||
struct ionic_cq cq[2];
|
||||
u8 udma_mask;
|
||||
u8 poll_idx;
|
||||
};
|
||||
|
||||
struct ionic_sq_meta {
|
||||
u64 wrid;
|
||||
u32 len;
|
||||
u16 seq;
|
||||
u8 ibop;
|
||||
u8 ibsts;
|
||||
u8 remote:1;
|
||||
u8 signal:1;
|
||||
u8 local_comp:1;
|
||||
};
|
||||
|
||||
struct ionic_rq_meta {
|
||||
struct ionic_rq_meta *next;
|
||||
u64 wrid;
|
||||
};
|
||||
|
||||
struct ionic_qp {
|
||||
struct ib_qp ibqp;
|
||||
enum ib_qp_state state;
|
||||
|
||||
u32 qpid;
|
||||
u32 ahid;
|
||||
u32 sq_cqid;
|
||||
u32 rq_cqid;
|
||||
u8 udma_idx;
|
||||
u8 has_ah:1;
|
||||
u8 has_sq:1;
|
||||
u8 has_rq:1;
|
||||
u8 sig_all:1;
|
||||
|
||||
struct list_head qp_list_counter;
|
||||
|
||||
struct list_head cq_poll_sq;
|
||||
struct list_head cq_flush_sq;
|
||||
struct list_head cq_flush_rq;
|
||||
struct list_head ibkill_flush_ent;
|
||||
|
||||
spinlock_t sq_lock; /* for posting and polling */
|
||||
struct ionic_queue sq;
|
||||
struct ionic_sq_meta *sq_meta;
|
||||
u16 *sq_msn_idx;
|
||||
int sq_spec;
|
||||
u16 sq_old_prod;
|
||||
u16 sq_msn_prod;
|
||||
u16 sq_msn_cons;
|
||||
u8 sq_cmb;
|
||||
bool sq_flush;
|
||||
bool sq_flush_rcvd;
|
||||
|
||||
spinlock_t rq_lock; /* for posting and polling */
|
||||
struct ionic_queue rq;
|
||||
struct ionic_rq_meta *rq_meta;
|
||||
struct ionic_rq_meta *rq_meta_head;
|
||||
int rq_spec;
|
||||
u16 rq_old_prod;
|
||||
u8 rq_cmb;
|
||||
bool rq_flush;
|
||||
|
||||
struct kref qp_kref;
|
||||
struct completion qp_rel_comp;
|
||||
|
||||
/* infrequently accessed, keep at end */
|
||||
int sgid_index;
|
||||
int sq_cmb_order;
|
||||
u32 sq_cmb_pgid;
|
||||
phys_addr_t sq_cmb_addr;
|
||||
struct rdma_user_mmap_entry *mmap_sq_cmb;
|
||||
|
||||
struct ib_umem *sq_umem;
|
||||
|
||||
int rq_cmb_order;
|
||||
u32 rq_cmb_pgid;
|
||||
phys_addr_t rq_cmb_addr;
|
||||
struct rdma_user_mmap_entry *mmap_rq_cmb;
|
||||
|
||||
struct ib_umem *rq_umem;
|
||||
|
||||
int dcqcn_profile;
|
||||
|
||||
struct ib_ud_header *hdr;
|
||||
};
|
||||
|
||||
struct ionic_ah {
|
||||
struct ib_ah ibah;
|
||||
u32 ahid;
|
||||
int sgid_index;
|
||||
struct ib_ud_header hdr;
|
||||
};
|
||||
|
||||
struct ionic_mr {
|
||||
union {
|
||||
struct ib_mr ibmr;
|
||||
struct ib_mw ibmw;
|
||||
};
|
||||
|
||||
u32 mrid;
|
||||
int flags;
|
||||
|
||||
struct ib_umem *umem;
|
||||
struct ionic_tbl_buf buf;
|
||||
bool created;
|
||||
};
|
||||
|
||||
struct ionic_counter_stats {
|
||||
int queue_stats_count;
|
||||
struct ionic_v1_stat *hdr;
|
||||
struct rdma_stat_desc *stats_hdrs;
|
||||
struct xarray xa_counters;
|
||||
};
|
||||
|
||||
struct ionic_counter {
|
||||
void *vals;
|
||||
struct list_head qp_list;
|
||||
};
|
||||
|
||||
static inline struct ionic_ibdev *to_ionic_ibdev(struct ib_device *ibdev)
|
||||
{
|
||||
return container_of(ibdev, struct ionic_ibdev, ibdev);
|
||||
}
|
||||
|
||||
static inline struct ionic_ctx *to_ionic_ctx(struct ib_ucontext *ibctx)
|
||||
{
|
||||
return container_of(ibctx, struct ionic_ctx, ibctx);
|
||||
}
|
||||
|
||||
static inline struct ionic_ctx *to_ionic_ctx_uobj(struct ib_uobject *uobj)
|
||||
{
|
||||
if (!uobj)
|
||||
return NULL;
|
||||
|
||||
if (!uobj->context)
|
||||
return NULL;
|
||||
|
||||
return to_ionic_ctx(uobj->context);
|
||||
}
|
||||
|
||||
static inline struct ionic_pd *to_ionic_pd(struct ib_pd *ibpd)
|
||||
{
|
||||
return container_of(ibpd, struct ionic_pd, ibpd);
|
||||
}
|
||||
|
||||
static inline struct ionic_mr *to_ionic_mr(struct ib_mr *ibmr)
|
||||
{
|
||||
return container_of(ibmr, struct ionic_mr, ibmr);
|
||||
}
|
||||
|
||||
static inline struct ionic_mr *to_ionic_mw(struct ib_mw *ibmw)
|
||||
{
|
||||
return container_of(ibmw, struct ionic_mr, ibmw);
|
||||
}
|
||||
|
||||
static inline struct ionic_vcq *to_ionic_vcq(struct ib_cq *ibcq)
|
||||
{
|
||||
return container_of(ibcq, struct ionic_vcq, ibcq);
|
||||
}
|
||||
|
||||
static inline struct ionic_cq *to_ionic_vcq_cq(struct ib_cq *ibcq,
|
||||
uint8_t udma_idx)
|
||||
{
|
||||
return &to_ionic_vcq(ibcq)->cq[udma_idx];
|
||||
}
|
||||
|
||||
static inline struct ionic_qp *to_ionic_qp(struct ib_qp *ibqp)
|
||||
{
|
||||
return container_of(ibqp, struct ionic_qp, ibqp);
|
||||
}
|
||||
|
||||
static inline struct ionic_ah *to_ionic_ah(struct ib_ah *ibah)
|
||||
{
|
||||
return container_of(ibah, struct ionic_ah, ibah);
|
||||
}
|
||||
|
||||
static inline u32 ionic_ctx_dbid(struct ionic_ibdev *dev,
|
||||
struct ionic_ctx *ctx)
|
||||
{
|
||||
if (!ctx)
|
||||
return dev->lif_cfg.dbid;
|
||||
|
||||
return ctx->dbid;
|
||||
}
|
||||
|
||||
static inline u32 ionic_obj_dbid(struct ionic_ibdev *dev,
|
||||
struct ib_uobject *uobj)
|
||||
{
|
||||
return ionic_ctx_dbid(dev, to_ionic_ctx_uobj(uobj));
|
||||
}
|
||||
|
||||
static inline bool ionic_ibop_is_local(enum ib_wr_opcode op)
|
||||
{
|
||||
return op == IB_WR_LOCAL_INV || op == IB_WR_REG_MR;
|
||||
}
|
||||
|
||||
static inline void ionic_qp_complete(struct kref *kref)
|
||||
{
|
||||
struct ionic_qp *qp = container_of(kref, struct ionic_qp, qp_kref);
|
||||
|
||||
complete(&qp->qp_rel_comp);
|
||||
}
|
||||
|
||||
static inline void ionic_cq_complete(struct kref *kref)
|
||||
{
|
||||
struct ionic_cq *cq = container_of(kref, struct ionic_cq, cq_kref);
|
||||
|
||||
complete(&cq->cq_rel_comp);
|
||||
}
|
||||
|
||||
/* ionic_admin.c */
|
||||
extern struct workqueue_struct *ionic_evt_workq;
|
||||
void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr);
|
||||
int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr,
|
||||
enum ionic_admin_flags);
|
||||
|
||||
int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev);
|
||||
|
||||
int ionic_create_rdma_admin(struct ionic_ibdev *dev);
|
||||
void ionic_destroy_rdma_admin(struct ionic_ibdev *dev);
|
||||
void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path);
|
||||
|
||||
/* ionic_controlpath.c */
|
||||
int ionic_create_cq_common(struct ionic_vcq *vcq,
|
||||
struct ionic_tbl_buf *buf,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ionic_ctx *ctx,
|
||||
struct ib_udata *udata,
|
||||
struct ionic_qdesc *req_cq,
|
||||
__u32 *resp_cqid,
|
||||
int udma_idx);
|
||||
void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq);
|
||||
void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp);
|
||||
void ionic_notify_flush_cq(struct ionic_cq *cq);
|
||||
|
||||
int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata);
|
||||
void ionic_dealloc_ucontext(struct ib_ucontext *ibctx);
|
||||
int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma);
|
||||
void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
|
||||
int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
|
||||
int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
|
||||
int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
|
||||
int ionic_destroy_ah(struct ib_ah *ibah, u32 flags);
|
||||
struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access);
|
||||
struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
|
||||
u64 addr, int access, struct ib_dmah *dmah,
|
||||
struct ib_udata *udata);
|
||||
struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset,
|
||||
u64 length, u64 addr, int fd, int access,
|
||||
struct ib_dmah *dmah,
|
||||
struct uverbs_attr_bundle *attrs);
|
||||
int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
|
||||
struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type,
|
||||
u32 max_sg);
|
||||
int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||
unsigned int *sg_offset);
|
||||
int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
|
||||
int ionic_dealloc_mw(struct ib_mw *ibmw);
|
||||
int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
struct uverbs_attr_bundle *attrs);
|
||||
int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
|
||||
struct ib_udata *udata);
|
||||
int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
|
||||
struct ib_qp_init_attr *init_attr);
|
||||
int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
|
||||
|
||||
/* ionic_datapath.c */
|
||||
int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad);
|
||||
int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad);
|
||||
int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc);
|
||||
int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
|
||||
/* ionic_hw_stats.c */
|
||||
void ionic_stats_init(struct ionic_ibdev *dev);
|
||||
void ionic_stats_cleanup(struct ionic_ibdev *dev);
|
||||
|
||||
/* ionic_pgtbl.c */
|
||||
__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va);
|
||||
__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va);
|
||||
int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma);
|
||||
int ionic_pgtbl_init(struct ionic_ibdev *dev,
|
||||
struct ionic_tbl_buf *buf,
|
||||
struct ib_umem *umem,
|
||||
dma_addr_t dma,
|
||||
int limit,
|
||||
u64 page_size);
|
||||
void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf);
|
||||
#endif /* _IONIC_IBDEV_H_ */
|
||||
111
drivers/infiniband/hw/ionic/ionic_lif_cfg.c
Normal file
111
drivers/infiniband/hw/ionic/ionic_lif_cfg.c
Normal file
@@ -0,0 +1,111 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <ionic.h>
|
||||
#include <ionic_lif.h>
|
||||
|
||||
#include "ionic_lif_cfg.h"
|
||||
|
||||
#define IONIC_MIN_RDMA_VERSION 0
|
||||
#define IONIC_MAX_RDMA_VERSION 2
|
||||
|
||||
static u8 ionic_get_expdb(struct ionic_lif *lif)
|
||||
{
|
||||
u8 expdb_support = 0;
|
||||
|
||||
if (lif->ionic->idev.phy_cmb_expdb64_pages)
|
||||
expdb_support |= IONIC_EXPDB_64B_WQE;
|
||||
if (lif->ionic->idev.phy_cmb_expdb128_pages)
|
||||
expdb_support |= IONIC_EXPDB_128B_WQE;
|
||||
if (lif->ionic->idev.phy_cmb_expdb256_pages)
|
||||
expdb_support |= IONIC_EXPDB_256B_WQE;
|
||||
if (lif->ionic->idev.phy_cmb_expdb512_pages)
|
||||
expdb_support |= IONIC_EXPDB_512B_WQE;
|
||||
|
||||
return expdb_support;
|
||||
}
|
||||
|
||||
void ionic_fill_lif_cfg(struct ionic_lif *lif, struct ionic_lif_cfg *cfg)
|
||||
{
|
||||
union ionic_lif_identity *ident = &lif->ionic->ident.lif;
|
||||
|
||||
cfg->lif = lif;
|
||||
cfg->hwdev = &lif->ionic->pdev->dev;
|
||||
cfg->lif_index = lif->index;
|
||||
cfg->lif_hw_index = lif->hw_index;
|
||||
|
||||
cfg->dbid = lif->kern_pid;
|
||||
cfg->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
|
||||
cfg->dbpage = lif->kern_dbpage;
|
||||
cfg->intr_ctrl = lif->ionic->idev.intr_ctrl;
|
||||
|
||||
cfg->db_phys = lif->ionic->bars[IONIC_PCI_BAR_DBELL].bus_addr;
|
||||
|
||||
if (IONIC_VERSION(ident->rdma.version, ident->rdma.minor_version) >=
|
||||
IONIC_VERSION(2, 1))
|
||||
cfg->page_size_supported =
|
||||
le64_to_cpu(ident->rdma.page_size_cap);
|
||||
else
|
||||
cfg->page_size_supported = IONIC_PAGE_SIZE_SUPPORTED;
|
||||
|
||||
cfg->rdma_version = ident->rdma.version;
|
||||
cfg->qp_opcodes = ident->rdma.qp_opcodes;
|
||||
cfg->admin_opcodes = ident->rdma.admin_opcodes;
|
||||
|
||||
cfg->stats_type = le16_to_cpu(ident->rdma.stats_type);
|
||||
cfg->npts_per_lif = le32_to_cpu(ident->rdma.npts_per_lif);
|
||||
cfg->nmrs_per_lif = le32_to_cpu(ident->rdma.nmrs_per_lif);
|
||||
cfg->nahs_per_lif = le32_to_cpu(ident->rdma.nahs_per_lif);
|
||||
|
||||
cfg->aq_base = le32_to_cpu(ident->rdma.aq_qtype.qid_base);
|
||||
cfg->cq_base = le32_to_cpu(ident->rdma.cq_qtype.qid_base);
|
||||
cfg->eq_base = le32_to_cpu(ident->rdma.eq_qtype.qid_base);
|
||||
|
||||
/*
|
||||
* ionic_create_rdma_admin() may reduce aq_count or eq_count if
|
||||
* it is unable to allocate all that were requested.
|
||||
* aq_count is tunable; see ionic_aq_count
|
||||
* eq_count is tunable; see ionic_eq_count
|
||||
*/
|
||||
cfg->aq_count = le32_to_cpu(ident->rdma.aq_qtype.qid_count);
|
||||
cfg->eq_count = le32_to_cpu(ident->rdma.eq_qtype.qid_count);
|
||||
cfg->cq_count = le32_to_cpu(ident->rdma.cq_qtype.qid_count);
|
||||
cfg->qp_count = le32_to_cpu(ident->rdma.sq_qtype.qid_count);
|
||||
cfg->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
|
||||
|
||||
cfg->aq_qtype = ident->rdma.aq_qtype.qtype;
|
||||
cfg->sq_qtype = ident->rdma.sq_qtype.qtype;
|
||||
cfg->rq_qtype = ident->rdma.rq_qtype.qtype;
|
||||
cfg->cq_qtype = ident->rdma.cq_qtype.qtype;
|
||||
cfg->eq_qtype = ident->rdma.eq_qtype.qtype;
|
||||
cfg->udma_qgrp_shift = ident->rdma.udma_shift;
|
||||
cfg->udma_count = 2;
|
||||
|
||||
cfg->max_stride = ident->rdma.max_stride;
|
||||
cfg->expdb_mask = ionic_get_expdb(lif);
|
||||
|
||||
cfg->sq_expdb =
|
||||
!!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_EXPDB);
|
||||
cfg->rq_expdb =
|
||||
!!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_EXPDB);
|
||||
}
|
||||
|
||||
struct net_device *ionic_lif_netdev(struct ionic_lif *lif)
|
||||
{
|
||||
struct net_device *netdev = lif->netdev;
|
||||
|
||||
dev_hold(netdev);
|
||||
return netdev;
|
||||
}
|
||||
|
||||
void ionic_lif_fw_version(struct ionic_lif *lif, char *str, size_t len)
|
||||
{
|
||||
strscpy(str, lif->ionic->idev.dev_info.fw_version, len);
|
||||
}
|
||||
|
||||
u8 ionic_lif_asic_rev(struct ionic_lif *lif)
|
||||
{
|
||||
return lif->ionic->idev.dev_info.asic_rev;
|
||||
}
|
||||
66
drivers/infiniband/hw/ionic/ionic_lif_cfg.h
Normal file
66
drivers/infiniband/hw/ionic/ionic_lif_cfg.h
Normal file
@@ -0,0 +1,66 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
|
||||
|
||||
#ifndef _IONIC_LIF_CFG_H_
|
||||
|
||||
#define IONIC_VERSION(a, b) (((a) << 16) + ((b) << 8))
|
||||
#define IONIC_PAGE_SIZE_SUPPORTED 0x40201000 /* 4kb, 2Mb, 1Gb */
|
||||
|
||||
#define IONIC_EXPDB_64B_WQE BIT(0)
|
||||
#define IONIC_EXPDB_128B_WQE BIT(1)
|
||||
#define IONIC_EXPDB_256B_WQE BIT(2)
|
||||
#define IONIC_EXPDB_512B_WQE BIT(3)
|
||||
|
||||
struct ionic_lif_cfg {
|
||||
struct device *hwdev;
|
||||
struct ionic_lif *lif;
|
||||
|
||||
int lif_index;
|
||||
int lif_hw_index;
|
||||
|
||||
u32 dbid;
|
||||
int dbid_count;
|
||||
u64 __iomem *dbpage;
|
||||
struct ionic_intr __iomem *intr_ctrl;
|
||||
phys_addr_t db_phys;
|
||||
|
||||
u64 page_size_supported;
|
||||
u32 npts_per_lif;
|
||||
u32 nmrs_per_lif;
|
||||
u32 nahs_per_lif;
|
||||
|
||||
u32 aq_base;
|
||||
u32 cq_base;
|
||||
u32 eq_base;
|
||||
|
||||
int aq_count;
|
||||
int eq_count;
|
||||
int cq_count;
|
||||
int qp_count;
|
||||
|
||||
u16 stats_type;
|
||||
u8 aq_qtype;
|
||||
u8 sq_qtype;
|
||||
u8 rq_qtype;
|
||||
u8 cq_qtype;
|
||||
u8 eq_qtype;
|
||||
|
||||
u8 udma_count;
|
||||
u8 udma_qgrp_shift;
|
||||
|
||||
u8 rdma_version;
|
||||
u8 qp_opcodes;
|
||||
u8 admin_opcodes;
|
||||
|
||||
u8 max_stride;
|
||||
bool sq_expdb;
|
||||
bool rq_expdb;
|
||||
u8 expdb_mask;
|
||||
};
|
||||
|
||||
void ionic_fill_lif_cfg(struct ionic_lif *lif, struct ionic_lif_cfg *cfg);
|
||||
struct net_device *ionic_lif_netdev(struct ionic_lif *lif);
|
||||
void ionic_lif_fw_version(struct ionic_lif *lif, char *str, size_t len);
|
||||
u8 ionic_lif_asic_rev(struct ionic_lif *lif);
|
||||
|
||||
#endif /* _IONIC_LIF_CFG_H_ */
|
||||
143
drivers/infiniband/hw/ionic/ionic_pgtbl.c
Normal file
143
drivers/infiniband/hw/ionic/ionic_pgtbl.c
Normal file
@@ -0,0 +1,143 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
|
||||
|
||||
#include <linux/mman.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include "ionic_fw.h"
|
||||
#include "ionic_ibdev.h"
|
||||
|
||||
__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va)
|
||||
{
|
||||
u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1;
|
||||
u64 dma;
|
||||
|
||||
if (!buf->tbl_pages)
|
||||
return cpu_to_le64(0);
|
||||
|
||||
if (buf->tbl_pages > 1)
|
||||
return cpu_to_le64(buf->tbl_dma);
|
||||
|
||||
if (buf->tbl_buf)
|
||||
dma = le64_to_cpu(buf->tbl_buf[0]);
|
||||
else
|
||||
dma = buf->tbl_dma;
|
||||
|
||||
return cpu_to_le64(dma + (va & pg_mask));
|
||||
}
|
||||
|
||||
__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va)
|
||||
{
|
||||
if (buf->tbl_pages > 1) {
|
||||
u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1;
|
||||
|
||||
return cpu_to_be64(va & pg_mask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma)
|
||||
{
|
||||
if (unlikely(buf->tbl_pages == buf->tbl_limit))
|
||||
return -ENOMEM;
|
||||
|
||||
if (buf->tbl_buf)
|
||||
buf->tbl_buf[buf->tbl_pages] = cpu_to_le64(dma);
|
||||
else
|
||||
buf->tbl_dma = dma;
|
||||
|
||||
++buf->tbl_pages;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ionic_tbl_buf_alloc(struct ionic_ibdev *dev,
|
||||
struct ionic_tbl_buf *buf)
|
||||
{
|
||||
int rc;
|
||||
|
||||
buf->tbl_size = buf->tbl_limit * sizeof(*buf->tbl_buf);
|
||||
buf->tbl_buf = kmalloc(buf->tbl_size, GFP_KERNEL);
|
||||
if (!buf->tbl_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
buf->tbl_dma = dma_map_single(dev->lif_cfg.hwdev, buf->tbl_buf,
|
||||
buf->tbl_size, DMA_TO_DEVICE);
|
||||
rc = dma_mapping_error(dev->lif_cfg.hwdev, buf->tbl_dma);
|
||||
if (rc) {
|
||||
kfree(buf->tbl_buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ionic_pgtbl_umem(struct ionic_tbl_buf *buf, struct ib_umem *umem)
|
||||
{
|
||||
struct ib_block_iter biter;
|
||||
u64 page_dma;
|
||||
int rc;
|
||||
|
||||
rdma_umem_for_each_dma_block(umem, &biter, BIT_ULL(buf->page_size_log2)) {
|
||||
page_dma = rdma_block_iter_dma_address(&biter);
|
||||
rc = ionic_pgtbl_page(buf, page_dma);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf)
|
||||
{
|
||||
if (buf->tbl_buf)
|
||||
dma_unmap_single(dev->lif_cfg.hwdev, buf->tbl_dma,
|
||||
buf->tbl_size, DMA_TO_DEVICE);
|
||||
|
||||
kfree(buf->tbl_buf);
|
||||
memset(buf, 0, sizeof(*buf));
|
||||
}
|
||||
|
||||
int ionic_pgtbl_init(struct ionic_ibdev *dev,
|
||||
struct ionic_tbl_buf *buf,
|
||||
struct ib_umem *umem,
|
||||
dma_addr_t dma,
|
||||
int limit,
|
||||
u64 page_size)
|
||||
{
|
||||
int rc;
|
||||
|
||||
memset(buf, 0, sizeof(*buf));
|
||||
|
||||
if (umem) {
|
||||
limit = ib_umem_num_dma_blocks(umem, page_size);
|
||||
buf->page_size_log2 = order_base_2(page_size);
|
||||
}
|
||||
|
||||
if (limit < 1)
|
||||
return -EINVAL;
|
||||
|
||||
buf->tbl_limit = limit;
|
||||
|
||||
/* skip pgtbl if contiguous / direct translation */
|
||||
if (limit > 1) {
|
||||
rc = ionic_tbl_buf_alloc(dev, buf);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (umem)
|
||||
rc = ionic_pgtbl_umem(buf, umem);
|
||||
else
|
||||
rc = ionic_pgtbl_page(buf, dma);
|
||||
|
||||
if (rc)
|
||||
goto err_unbuf;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unbuf:
|
||||
ionic_pgtbl_unbuf(dev, buf);
|
||||
return rc;
|
||||
}
|
||||
52
drivers/infiniband/hw/ionic/ionic_queue.c
Normal file
52
drivers/infiniband/hw/ionic/ionic_queue.c
Normal file
@@ -0,0 +1,52 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include "ionic_queue.h"
|
||||
|
||||
int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
|
||||
int depth, size_t stride)
|
||||
{
|
||||
if (depth < 0 || depth > 0xffff)
|
||||
return -EINVAL;
|
||||
|
||||
if (stride == 0 || stride > 0x10000)
|
||||
return -EINVAL;
|
||||
|
||||
if (depth == 0)
|
||||
depth = 1;
|
||||
|
||||
q->depth_log2 = order_base_2(depth + 1);
|
||||
q->stride_log2 = order_base_2(stride);
|
||||
|
||||
if (q->depth_log2 + q->stride_log2 < PAGE_SHIFT)
|
||||
q->depth_log2 = PAGE_SHIFT - q->stride_log2;
|
||||
|
||||
if (q->depth_log2 > 16 || q->stride_log2 > 16)
|
||||
return -EINVAL;
|
||||
|
||||
q->size = BIT_ULL(q->depth_log2 + q->stride_log2);
|
||||
q->mask = BIT(q->depth_log2) - 1;
|
||||
|
||||
q->ptr = dma_alloc_coherent(dma_dev, q->size, &q->dma, GFP_KERNEL);
|
||||
if (!q->ptr)
|
||||
return -ENOMEM;
|
||||
|
||||
/* it will always be page aligned, but just to be sure... */
|
||||
if (!PAGE_ALIGNED(q->ptr)) {
|
||||
dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
q->prod = 0;
|
||||
q->cons = 0;
|
||||
q->dbell = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev)
|
||||
{
|
||||
dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
|
||||
}
|
||||
234
drivers/infiniband/hw/ionic/ionic_queue.h
Normal file
234
drivers/infiniband/hw/ionic/ionic_queue.h
Normal file
@@ -0,0 +1,234 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
|
||||
|
||||
#ifndef _IONIC_QUEUE_H_
|
||||
#define _IONIC_QUEUE_H_
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <ionic_regs.h>
|
||||
|
||||
#define IONIC_MAX_DEPTH 0xffff
|
||||
#define IONIC_MAX_CQ_DEPTH 0xffff
|
||||
#define IONIC_CQ_RING_ARM IONIC_DBELL_RING_1
|
||||
#define IONIC_CQ_RING_SOL IONIC_DBELL_RING_2
|
||||
|
||||
/**
|
||||
* struct ionic_queue - Ring buffer used between device and driver
|
||||
* @size: Size of the buffer, in bytes
|
||||
* @dma: Dma address of the buffer
|
||||
* @ptr: Buffer virtual address
|
||||
* @prod: Driver position in the queue
|
||||
* @cons: Device position in the queue
|
||||
* @mask: Capacity of the queue, subtracting the hole
|
||||
* This value is equal to ((1 << depth_log2) - 1)
|
||||
* @depth_log2: Log base two size depth of the queue
|
||||
* @stride_log2: Log base two size of an element in the queue
|
||||
* @dbell: Doorbell identifying bits
|
||||
*/
|
||||
struct ionic_queue {
|
||||
size_t size;
|
||||
dma_addr_t dma;
|
||||
void *ptr;
|
||||
u16 prod;
|
||||
u16 cons;
|
||||
u16 mask;
|
||||
u8 depth_log2;
|
||||
u8 stride_log2;
|
||||
u64 dbell;
|
||||
};
|
||||
|
||||
/**
|
||||
* ionic_queue_init() - Initialize user space queue
|
||||
* @q: Uninitialized queue structure
|
||||
* @dma_dev: DMA device for mapping
|
||||
* @depth: Depth of the queue
|
||||
* @stride: Size of each element of the queue
|
||||
*
|
||||
* Return: status code
|
||||
*/
|
||||
int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
|
||||
int depth, size_t stride);
|
||||
|
||||
/**
|
||||
* ionic_queue_destroy() - Destroy user space queue
|
||||
* @q: Queue structure
|
||||
* @dma_dev: DMA device for mapping
|
||||
*
|
||||
* Return: status code
|
||||
*/
|
||||
void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev);
|
||||
|
||||
/**
|
||||
* ionic_queue_empty() - Test if queue is empty
|
||||
* @q: Queue structure
|
||||
*
|
||||
* This is only valid for to-device queues.
|
||||
*
|
||||
* Return: is empty
|
||||
*/
|
||||
static inline bool ionic_queue_empty(struct ionic_queue *q)
|
||||
{
|
||||
return q->prod == q->cons;
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_queue_length() - Get the current length of the queue
|
||||
* @q: Queue structure
|
||||
*
|
||||
* This is only valid for to-device queues.
|
||||
*
|
||||
* Return: length
|
||||
*/
|
||||
static inline u16 ionic_queue_length(struct ionic_queue *q)
|
||||
{
|
||||
return (q->prod - q->cons) & q->mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_queue_length_remaining() - Get the remaining length of the queue
|
||||
* @q: Queue structure
|
||||
*
|
||||
* This is only valid for to-device queues.
|
||||
*
|
||||
* Return: length remaining
|
||||
*/
|
||||
static inline u16 ionic_queue_length_remaining(struct ionic_queue *q)
|
||||
{
|
||||
return q->mask - ionic_queue_length(q);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_queue_full() - Test if queue is full
|
||||
* @q: Queue structure
|
||||
*
|
||||
* This is only valid for to-device queues.
|
||||
*
|
||||
* Return: is full
|
||||
*/
|
||||
static inline bool ionic_queue_full(struct ionic_queue *q)
|
||||
{
|
||||
return q->mask == ionic_queue_length(q);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_color_wrap() - Flip the color if prod is wrapped
|
||||
* @prod: Queue index just after advancing
|
||||
* @color: Queue color just prior to advancing the index
|
||||
*
|
||||
* Return: color after advancing the index
|
||||
*/
|
||||
static inline bool ionic_color_wrap(u16 prod, bool color)
|
||||
{
|
||||
/* logical xor color with (prod == 0) */
|
||||
return color != (prod == 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_queue_at() - Get the element at the given index
|
||||
* @q: Queue structure
|
||||
* @idx: Index in the queue
|
||||
*
|
||||
* The index must be within the bounds of the queue. It is not checked here.
|
||||
*
|
||||
* Return: pointer to element at index
|
||||
*/
|
||||
static inline void *ionic_queue_at(struct ionic_queue *q, u16 idx)
|
||||
{
|
||||
return q->ptr + ((unsigned long)idx << q->stride_log2);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_queue_at_prod() - Get the element at the producer index
|
||||
* @q: Queue structure
|
||||
*
|
||||
* Return: pointer to element at producer index
|
||||
*/
|
||||
static inline void *ionic_queue_at_prod(struct ionic_queue *q)
|
||||
{
|
||||
return ionic_queue_at(q, q->prod);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_queue_at_cons() - Get the element at the consumer index
|
||||
* @q: Queue structure
|
||||
*
|
||||
* Return: pointer to element at consumer index
|
||||
*/
|
||||
static inline void *ionic_queue_at_cons(struct ionic_queue *q)
|
||||
{
|
||||
return ionic_queue_at(q, q->cons);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_queue_next() - Compute the next index
|
||||
* @q: Queue structure
|
||||
* @idx: Index
|
||||
*
|
||||
* Return: next index after idx
|
||||
*/
|
||||
static inline u16 ionic_queue_next(struct ionic_queue *q, u16 idx)
|
||||
{
|
||||
return (idx + 1) & q->mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_queue_produce() - Increase the producer index
|
||||
* @q: Queue structure
|
||||
*
|
||||
* Caller must ensure that the queue is not full. It is not checked here.
|
||||
*/
|
||||
static inline void ionic_queue_produce(struct ionic_queue *q)
|
||||
{
|
||||
q->prod = ionic_queue_next(q, q->prod);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_queue_consume() - Increase the consumer index
|
||||
* @q: Queue structure
|
||||
*
|
||||
* Caller must ensure that the queue is not empty. It is not checked here.
|
||||
*
|
||||
* This is only valid for to-device queues.
|
||||
*/
|
||||
static inline void ionic_queue_consume(struct ionic_queue *q)
|
||||
{
|
||||
q->cons = ionic_queue_next(q, q->cons);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_queue_consume_entries() - Increase the consumer index by entries
|
||||
* @q: Queue structure
|
||||
* @entries: Number of entries to increment
|
||||
*
|
||||
* Caller must ensure that the queue is not empty. It is not checked here.
|
||||
*
|
||||
* This is only valid for to-device queues.
|
||||
*/
|
||||
static inline void ionic_queue_consume_entries(struct ionic_queue *q,
|
||||
u16 entries)
|
||||
{
|
||||
q->cons = (q->cons + entries) & q->mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_queue_dbell_init() - Initialize doorbell bits for queue id
|
||||
* @q: Queue structure
|
||||
* @qid: Queue identifying number
|
||||
*/
|
||||
static inline void ionic_queue_dbell_init(struct ionic_queue *q, u32 qid)
|
||||
{
|
||||
q->dbell = IONIC_DBELL_QID(qid);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_queue_dbell_val() - Get current doorbell update value
|
||||
* @q: Queue structure
|
||||
*
|
||||
* Return: current doorbell update value
|
||||
*/
|
||||
static inline u64 ionic_queue_dbell_val(struct ionic_queue *q)
|
||||
{
|
||||
return q->dbell | q->prod;
|
||||
}
|
||||
|
||||
#endif /* _IONIC_QUEUE_H_ */
|
||||
154
drivers/infiniband/hw/ionic/ionic_res.h
Normal file
154
drivers/infiniband/hw/ionic/ionic_res.h
Normal file
@@ -0,0 +1,154 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
|
||||
|
||||
#ifndef _IONIC_RES_H_
|
||||
#define _IONIC_RES_H_
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/idr.h>
|
||||
|
||||
/**
|
||||
* struct ionic_resid_bits - Number allocator based on IDA
|
||||
*
|
||||
* @inuse: IDA handle
|
||||
* @inuse_size: Highest ID limit for IDA
|
||||
*/
|
||||
struct ionic_resid_bits {
|
||||
struct ida inuse;
|
||||
unsigned int inuse_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* ionic_resid_init() - Initialize a resid allocator
|
||||
* @resid: Uninitialized resid allocator
|
||||
* @size: Capacity of the allocator
|
||||
*
|
||||
* Return: Zero on success, or negative error number
|
||||
*/
|
||||
static inline void ionic_resid_init(struct ionic_resid_bits *resid,
|
||||
unsigned int size)
|
||||
{
|
||||
resid->inuse_size = size;
|
||||
ida_init(&resid->inuse);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_resid_destroy() - Destroy a resid allocator
|
||||
* @resid: Resid allocator
|
||||
*/
|
||||
static inline void ionic_resid_destroy(struct ionic_resid_bits *resid)
|
||||
{
|
||||
ida_destroy(&resid->inuse);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_resid_get_shared() - Allocate an available shared resource id
|
||||
* @resid: Resid allocator
|
||||
* @min: Smallest valid resource id
|
||||
* @size: One after largest valid resource id
|
||||
*
|
||||
* Return: Resource id, or negative error number
|
||||
*/
|
||||
static inline int ionic_resid_get_shared(struct ionic_resid_bits *resid,
|
||||
unsigned int min,
|
||||
unsigned int size)
|
||||
{
|
||||
return ida_alloc_range(&resid->inuse, min, size - 1, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_resid_get() - Allocate an available resource id
|
||||
* @resid: Resid allocator
|
||||
*
|
||||
* Return: Resource id, or negative error number
|
||||
*/
|
||||
static inline int ionic_resid_get(struct ionic_resid_bits *resid)
|
||||
{
|
||||
return ionic_resid_get_shared(resid, 0, resid->inuse_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_resid_put() - Free a resource id
|
||||
* @resid: Resid allocator
|
||||
* @id: Resource id
|
||||
*/
|
||||
static inline void ionic_resid_put(struct ionic_resid_bits *resid, int id)
|
||||
{
|
||||
ida_free(&resid->inuse, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_bitid_to_qid() - Transform a resource bit index into a queue id
|
||||
* @bitid: Bit index
|
||||
* @qgrp_shift: Log2 number of queues per queue group
|
||||
* @half_qid_shift: Log2 of half the total number of queues
|
||||
*
|
||||
* Return: Queue id
|
||||
*
|
||||
* Udma-constrained queues (QPs and CQs) are associated with their udma by
|
||||
* queue group. Even queue groups are associated with udma0, and odd queue
|
||||
* groups with udma1.
|
||||
*
|
||||
* For allocating queue ids, we want to arrange the bits into two halves,
|
||||
* with the even queue groups of udma0 in the lower half of the bitset,
|
||||
* and the odd queue groups of udma1 in the upper half of the bitset.
|
||||
* Then, one or two calls of find_next_zero_bit can examine all the bits
|
||||
* for queues of an entire udma.
|
||||
*
|
||||
* For example, assuming eight queue groups with qgrp qids per group:
|
||||
*
|
||||
* bitid 0*qgrp..1*qgrp-1 : qid 0*qgrp..1*qgrp-1
|
||||
* bitid 1*qgrp..2*qgrp-1 : qid 2*qgrp..3*qgrp-1
|
||||
* bitid 2*qgrp..3*qgrp-1 : qid 4*qgrp..5*qgrp-1
|
||||
* bitid 3*qgrp..4*qgrp-1 : qid 6*qgrp..7*qgrp-1
|
||||
* bitid 4*qgrp..5*qgrp-1 : qid 1*qgrp..2*qgrp-1
|
||||
* bitid 5*qgrp..6*qgrp-1 : qid 3*qgrp..4*qgrp-1
|
||||
* bitid 6*qgrp..7*qgrp-1 : qid 5*qgrp..6*qgrp-1
|
||||
* bitid 7*qgrp..8*qgrp-1 : qid 7*qgrp..8*qgrp-1
|
||||
*
|
||||
* There are three important ranges of bits in the qid. There is the udma
|
||||
* bit "U" at qgrp_shift, which is the least significant bit of the group
|
||||
* index, and determines which udma a queue is associated with.
|
||||
* The bits of lesser significance we can call the idx bits "I", which are
|
||||
* the index of the queue within the group. The bits of greater significance
|
||||
* we can call the grp bits "G", which are other bits of the group index that
|
||||
* do not determine the udma. Those bits are just rearranged in the bit index
|
||||
* in the bitset. A bitid has the udma bit in the most significant place,
|
||||
* then the grp bits, then the idx bits.
|
||||
*
|
||||
* bitid: 00000000000000 U GGG IIIIII
|
||||
* qid: 00000000000000 GGG U IIIIII
|
||||
*
|
||||
* Transforming from bit index to qid, or from qid to bit index, can be
|
||||
* accomplished by rearranging the bits by masking and shifting.
|
||||
*/
|
||||
static inline u32 ionic_bitid_to_qid(u32 bitid, u8 qgrp_shift,
|
||||
u8 half_qid_shift)
|
||||
{
|
||||
u32 udma_bit =
|
||||
(bitid & BIT(half_qid_shift)) >> (half_qid_shift - qgrp_shift);
|
||||
u32 grp_bits = (bitid & GENMASK(half_qid_shift - 1, qgrp_shift)) << 1;
|
||||
u32 idx_bits = bitid & (BIT(qgrp_shift) - 1);
|
||||
|
||||
return grp_bits | udma_bit | idx_bits;
|
||||
}
|
||||
|
||||
/**
|
||||
* ionic_qid_to_bitid() - Transform a queue id into a resource bit index
|
||||
* @qid: queue index
|
||||
* @qgrp_shift: Log2 number of queues per queue group
|
||||
* @half_qid_shift: Log2 of half the total number of queues
|
||||
*
|
||||
* Return: Resource bit index
|
||||
*
|
||||
* This is the inverse of ionic_bitid_to_qid().
|
||||
*/
|
||||
static inline u32 ionic_qid_to_bitid(u32 qid, u8 qgrp_shift, u8 half_qid_shift)
|
||||
{
|
||||
u32 udma_bit = (qid & BIT(qgrp_shift)) << (half_qid_shift - qgrp_shift);
|
||||
u32 grp_bits = (qid & GENMASK(half_qid_shift, qgrp_shift + 1)) >> 1;
|
||||
u32 idx_bits = qid & (BIT(qgrp_shift) - 1);
|
||||
|
||||
return udma_bit | grp_bits | idx_bits;
|
||||
}
|
||||
#endif /* _IONIC_RES_H_ */
|
||||
@@ -4,10 +4,11 @@ config INFINIBAND_IRDMA
|
||||
depends on INET
|
||||
depends on IPV6 || !IPV6
|
||||
depends on PCI
|
||||
depends on ICE && I40E
|
||||
depends on IDPF && ICE && I40E
|
||||
select GENERIC_ALLOCATOR
|
||||
select AUXILIARY_BUS
|
||||
select CRC32
|
||||
help
|
||||
This is an Intel(R) Ethernet Protocol Driver for RDMA driver
|
||||
that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.
|
||||
This is an Intel(R) Ethernet Protocol Driver for RDMA that
|
||||
supports IPU E2000 (RoCEv2), E810 (iWARP/RoCEv2) and X722 (iWARP)
|
||||
network devices.
|
||||
|
||||
@@ -13,7 +13,10 @@ irdma-objs := cm.o \
|
||||
hw.o \
|
||||
i40iw_hw.o \
|
||||
i40iw_if.o \
|
||||
ig3rdma_if.o\
|
||||
icrdma_if.o \
|
||||
icrdma_hw.o \
|
||||
ig3rdma_hw.o\
|
||||
main.o \
|
||||
pble.o \
|
||||
puda.o \
|
||||
@@ -22,6 +25,7 @@ irdma-objs := cm.o \
|
||||
uk.o \
|
||||
utils.o \
|
||||
verbs.o \
|
||||
virtchnl.o \
|
||||
ws.o \
|
||||
|
||||
CFLAGS_trace.o = -I$(src)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -14,6 +14,18 @@
|
||||
#define IRDMA_PE_DB_SIZE_4M 1
|
||||
#define IRDMA_PE_DB_SIZE_8M 2
|
||||
|
||||
#define IRDMA_IRD_HW_SIZE_4_GEN3 0
|
||||
#define IRDMA_IRD_HW_SIZE_8_GEN3 1
|
||||
#define IRDMA_IRD_HW_SIZE_16_GEN3 2
|
||||
#define IRDMA_IRD_HW_SIZE_32_GEN3 3
|
||||
#define IRDMA_IRD_HW_SIZE_64_GEN3 4
|
||||
#define IRDMA_IRD_HW_SIZE_128_GEN3 5
|
||||
#define IRDMA_IRD_HW_SIZE_256_GEN3 6
|
||||
#define IRDMA_IRD_HW_SIZE_512_GEN3 7
|
||||
#define IRDMA_IRD_HW_SIZE_1024_GEN3 8
|
||||
#define IRDMA_IRD_HW_SIZE_2048_GEN3 9
|
||||
#define IRDMA_IRD_HW_SIZE_4096_GEN3 10
|
||||
|
||||
#define IRDMA_IRD_HW_SIZE_4 0
|
||||
#define IRDMA_IRD_HW_SIZE_16 1
|
||||
#define IRDMA_IRD_HW_SIZE_64 2
|
||||
@@ -114,6 +126,13 @@ enum irdma_protocol_used {
|
||||
#define IRDMA_UPDATE_SD_BUFF_SIZE 128
|
||||
#define IRDMA_FEATURE_BUF_SIZE (8 * IRDMA_MAX_FEATURES)
|
||||
|
||||
#define ENABLE_LOC_MEM 63
|
||||
#define IRDMA_ATOMICS_ALLOWED_BIT 1
|
||||
#define MAX_PBLE_PER_SD 0x40000
|
||||
#define MAX_PBLE_SD_PER_FCN 0x400
|
||||
#define MAX_MR_PER_SD 0x8000
|
||||
#define MAX_MR_SD_PER_FCN 0x80
|
||||
#define IRDMA_PBLE_COMMIT_OFFSET 112
|
||||
#define IRDMA_MAX_QUANTA_PER_WR 8
|
||||
|
||||
#define IRDMA_QP_SW_MAX_WQ_QUANTA 32768
|
||||
@@ -121,6 +140,10 @@ enum irdma_protocol_used {
|
||||
#define IRDMA_QP_SW_MAX_RQ_QUANTA 32768
|
||||
#define IRDMA_MAX_QP_WRS(max_quanta_per_wr) \
|
||||
((IRDMA_QP_SW_MAX_WQ_QUANTA - IRDMA_SQ_RSVD) / (max_quanta_per_wr))
|
||||
#define IRDMA_SRQ_MIN_QUANTA 8
|
||||
#define IRDMA_SRQ_MAX_QUANTA 262144
|
||||
#define IRDMA_MAX_SRQ_WRS \
|
||||
((IRDMA_SRQ_MAX_QUANTA - IRDMA_RQ_RSVD) / IRDMA_MAX_QUANTA_PER_WR)
|
||||
|
||||
#define IRDMAQP_TERM_SEND_TERM_AND_FIN 0
|
||||
#define IRDMAQP_TERM_SEND_TERM_ONLY 1
|
||||
@@ -147,8 +170,13 @@ enum irdma_protocol_used {
|
||||
#define IRDMA_SQ_RSVD 258
|
||||
#define IRDMA_RQ_RSVD 1
|
||||
|
||||
#define IRDMA_FEATURE_RTS_AE 1ULL
|
||||
#define IRDMA_FEATURE_CQ_RESIZE 2ULL
|
||||
#define IRDMA_FEATURE_RTS_AE BIT_ULL(0)
|
||||
#define IRDMA_FEATURE_CQ_RESIZE BIT_ULL(1)
|
||||
#define IRDMA_FEATURE_64_BYTE_CQE BIT_ULL(5)
|
||||
#define IRDMA_FEATURE_ATOMIC_OPS BIT_ULL(6)
|
||||
#define IRDMA_FEATURE_SRQ BIT_ULL(7)
|
||||
#define IRDMA_FEATURE_CQE_TIMESTAMPING BIT_ULL(8)
|
||||
|
||||
#define IRDMAQP_OP_RDMA_WRITE 0x00
|
||||
#define IRDMAQP_OP_RDMA_READ 0x01
|
||||
#define IRDMAQP_OP_RDMA_SEND 0x03
|
||||
@@ -161,6 +189,8 @@ enum irdma_protocol_used {
|
||||
#define IRDMAQP_OP_RDMA_READ_LOC_INV 0x0b
|
||||
#define IRDMAQP_OP_NOP 0x0c
|
||||
#define IRDMAQP_OP_RDMA_WRITE_SOL 0x0d
|
||||
#define IRDMAQP_OP_ATOMIC_FETCH_ADD 0x0f
|
||||
#define IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD 0x11
|
||||
#define IRDMAQP_OP_GEN_RTS_AE 0x30
|
||||
|
||||
enum irdma_cqp_op_type {
|
||||
@@ -212,9 +242,12 @@ enum irdma_cqp_op_type {
|
||||
IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46,
|
||||
IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47,
|
||||
IRDMA_OP_CQ_MODIFY = 48,
|
||||
IRDMA_OP_SRQ_CREATE = 49,
|
||||
IRDMA_OP_SRQ_MODIFY = 50,
|
||||
IRDMA_OP_SRQ_DESTROY = 51,
|
||||
|
||||
/* Must be last entry*/
|
||||
IRDMA_MAX_CQP_OPS = 49,
|
||||
IRDMA_MAX_CQP_OPS = 52,
|
||||
};
|
||||
|
||||
/* CQP SQ WQES */
|
||||
@@ -224,6 +257,9 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQP_OP_CREATE_CQ 0x03
|
||||
#define IRDMA_CQP_OP_MODIFY_CQ 0x04
|
||||
#define IRDMA_CQP_OP_DESTROY_CQ 0x05
|
||||
#define IRDMA_CQP_OP_CREATE_SRQ 0x06
|
||||
#define IRDMA_CQP_OP_MODIFY_SRQ 0x07
|
||||
#define IRDMA_CQP_OP_DESTROY_SRQ 0x08
|
||||
#define IRDMA_CQP_OP_ALLOC_STAG 0x09
|
||||
#define IRDMA_CQP_OP_REG_MR 0x0a
|
||||
#define IRDMA_CQP_OP_QUERY_STAG 0x0b
|
||||
@@ -265,97 +301,6 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQP_OP_GATHER_STATS 0x2e
|
||||
#define IRDMA_CQP_OP_UP_MAP 0x2f
|
||||
|
||||
/* Async Events codes */
|
||||
#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
|
||||
#define IRDMA_AE_AMP_INVALID_STAG 0x0103
|
||||
#define IRDMA_AE_AMP_BAD_QP 0x0104
|
||||
#define IRDMA_AE_AMP_BAD_PD 0x0105
|
||||
#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
|
||||
#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
|
||||
#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
|
||||
#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
|
||||
#define IRDMA_AE_AMP_TO_WRAP 0x010a
|
||||
#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
|
||||
#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
|
||||
#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
|
||||
#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
|
||||
#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
|
||||
#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
|
||||
#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
|
||||
#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
|
||||
#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
|
||||
#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
|
||||
#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
|
||||
#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
|
||||
#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
|
||||
#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
|
||||
#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
|
||||
#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
|
||||
#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
|
||||
#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
|
||||
#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
|
||||
#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
|
||||
#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
|
||||
#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
|
||||
#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
|
||||
#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
|
||||
#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
|
||||
#define IRDMA_AE_BAD_CLOSE 0x0201
|
||||
#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
|
||||
#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
|
||||
#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
|
||||
#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
|
||||
#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
|
||||
#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
|
||||
#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
|
||||
#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
|
||||
#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
|
||||
#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
|
||||
#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
|
||||
#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
|
||||
#define IRDMA_AE_INVALID_REQUEST 0x0223
|
||||
#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
|
||||
#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
|
||||
#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
|
||||
#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
|
||||
#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
|
||||
#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
|
||||
#define IRDMA_AE_DDP_NO_L_BIT 0x0308
|
||||
#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
|
||||
#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
|
||||
#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
|
||||
#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
|
||||
#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
|
||||
#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
|
||||
#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
|
||||
#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
|
||||
#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
|
||||
#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
|
||||
#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
|
||||
#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
|
||||
#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
|
||||
#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
|
||||
#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
|
||||
#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
|
||||
#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
|
||||
#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
|
||||
#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
|
||||
#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
|
||||
#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
|
||||
#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
|
||||
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
|
||||
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
|
||||
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
|
||||
#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
|
||||
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
|
||||
#define IRDMA_AE_RESET_SENT 0x0601
|
||||
#define IRDMA_AE_TERMINATE_SENT 0x0602
|
||||
#define IRDMA_AE_RESET_NOT_SENT 0x0603
|
||||
#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
|
||||
#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
|
||||
#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
|
||||
#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
|
||||
|
||||
#define FLD_LS_64(dev, val, field) \
|
||||
(((u64)(val) << (dev)->hw_shifts[field ## _S]) & (dev)->hw_masks[field ## _M])
|
||||
#define FLD_RS_64(dev, val, field) \
|
||||
@@ -393,9 +338,13 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQPSQ_STATS_USE_INST BIT_ULL(61)
|
||||
#define IRDMA_CQPSQ_STATS_OP GENMASK_ULL(37, 32)
|
||||
#define IRDMA_CQPSQ_STATS_INST_INDEX GENMASK_ULL(6, 0)
|
||||
#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(5, 0)
|
||||
#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(15, 0)
|
||||
#define IRDMA_CQPSQ_WS_WQEVALID BIT_ULL(63)
|
||||
#define IRDMA_CQPSQ_WS_NODEOP GENMASK_ULL(53, 52)
|
||||
#define IRDMA_CQPSQ_WS_NODEOP GENMASK_ULL(55, 52)
|
||||
#define IRDMA_SD_MAX GENMASK_ULL(15, 0)
|
||||
#define IRDMA_MEM_MAX GENMASK_ULL(15, 0)
|
||||
#define IRDMA_QP_MEM_LOC GENMASK_ULL(47, 44)
|
||||
#define IRDMA_MR_MEM_LOC GENMASK_ULL(27, 24)
|
||||
|
||||
#define IRDMA_CQPSQ_WS_ENABLENODE BIT_ULL(62)
|
||||
#define IRDMA_CQPSQ_WS_NODETYPE BIT_ULL(61)
|
||||
@@ -404,16 +353,16 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQPSQ_WS_VMVFTYPE GENMASK_ULL(55, 54)
|
||||
#define IRDMA_CQPSQ_WS_VMVFNUM GENMASK_ULL(51, 42)
|
||||
#define IRDMA_CQPSQ_WS_OP GENMASK_ULL(37, 32)
|
||||
#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(25, 16)
|
||||
#define IRDMA_CQPSQ_WS_NODEID GENMASK_ULL(9, 0)
|
||||
#define IRDMA_CQPSQ_WS_VSI GENMASK_ULL(57, 48)
|
||||
#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(29, 16)
|
||||
#define IRDMA_CQPSQ_WS_NODEID GENMASK_ULL(13, 0)
|
||||
#define IRDMA_CQPSQ_WS_VSI GENMASK_ULL(63, 48)
|
||||
#define IRDMA_CQPSQ_WS_WEIGHT GENMASK_ULL(38, 32)
|
||||
|
||||
#define IRDMA_CQPSQ_UP_WQEVALID BIT_ULL(63)
|
||||
#define IRDMA_CQPSQ_UP_USEVLAN BIT_ULL(62)
|
||||
#define IRDMA_CQPSQ_UP_USEOVERRIDE BIT_ULL(61)
|
||||
#define IRDMA_CQPSQ_UP_OP GENMASK_ULL(37, 32)
|
||||
#define IRDMA_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(5, 0)
|
||||
#define IRDMA_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(15, 0)
|
||||
#define IRDMA_CQPSQ_UP_CNPOVERRIDE GENMASK_ULL(37, 32)
|
||||
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID BIT_ULL(63)
|
||||
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN GENMASK_ULL(31, 0)
|
||||
@@ -448,6 +397,16 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQPHC_SVER GENMASK_ULL(31, 24)
|
||||
#define IRDMA_CQPHC_SQBASE GENMASK_ULL(63, 9)
|
||||
|
||||
#define IRDMA_CQPHC_TIMESTAMP_OVERRIDE BIT_ULL(5)
|
||||
#define IRDMA_CQPHC_TS_SHIFT GENMASK_ULL(12, 8)
|
||||
#define IRDMA_CQPHC_EN_FINE_GRAINED_TIMERS BIT_ULL(0)
|
||||
|
||||
#define IRDMA_CQPHC_OOISC_BLKSIZE GENMASK_ULL(63, 60)
|
||||
#define IRDMA_CQPHC_RRSP_BLKSIZE GENMASK_ULL(59, 56)
|
||||
#define IRDMA_CQPHC_Q1_BLKSIZE GENMASK_ULL(55, 52)
|
||||
#define IRDMA_CQPHC_XMIT_BLKSIZE GENMASK_ULL(51, 48)
|
||||
#define IRDMA_CQPHC_BLKSIZES_VALID BIT_ULL(4)
|
||||
|
||||
#define IRDMA_CQPHC_QPCTX GENMASK_ULL(63, 0)
|
||||
#define IRDMA_QP_DBSA_HW_SQ_TAIL GENMASK_ULL(14, 0)
|
||||
#define IRDMA_CQ_DBSA_CQEIDX GENMASK_ULL(19, 0)
|
||||
@@ -461,6 +420,8 @@ enum irdma_cqp_op_type {
|
||||
|
||||
#define IRDMA_CCQ_OPRETVAL GENMASK_ULL(31, 0)
|
||||
|
||||
#define IRDMA_CCQ_DEFINFO GENMASK_ULL(63, 32)
|
||||
|
||||
#define IRDMA_CQ_MINERR GENMASK_ULL(15, 0)
|
||||
#define IRDMA_CQ_MAJERR GENMASK_ULL(31, 16)
|
||||
#define IRDMA_CQ_WQEIDX GENMASK_ULL(46, 32)
|
||||
@@ -469,6 +430,7 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQ_ERROR BIT_ULL(55)
|
||||
#define IRDMA_CQ_SQ BIT_ULL(62)
|
||||
|
||||
#define IRDMA_CQ_SRQ BIT_ULL(52)
|
||||
#define IRDMA_CQ_VALID BIT_ULL(63)
|
||||
#define IRDMA_CQ_IMMVALID BIT_ULL(62)
|
||||
#define IRDMA_CQ_UDSMACVALID BIT_ULL(61)
|
||||
@@ -476,8 +438,6 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQ_UDSMAC GENMASK_ULL(47, 0)
|
||||
#define IRDMA_CQ_UDVLAN GENMASK_ULL(63, 48)
|
||||
|
||||
#define IRDMA_CQ_IMMDATA_S 0
|
||||
#define IRDMA_CQ_IMMDATA_M (0xffffffffffffffffULL << IRDMA_CQ_IMMVALID_S)
|
||||
#define IRDMA_CQ_IMMDATALOW32 GENMASK_ULL(31, 0)
|
||||
#define IRDMA_CQ_IMMDATAUP32 GENMASK_ULL(63, 32)
|
||||
#define IRDMACQ_PAYLDLEN GENMASK_ULL(31, 0)
|
||||
@@ -508,6 +468,17 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_AEQE_Q2DATA GENMASK_ULL(62, 61)
|
||||
#define IRDMA_AEQE_VALID BIT_ULL(63)
|
||||
|
||||
#define IRDMA_AEQE_Q2DATA_GEN_3 GENMASK_ULL(5, 4)
|
||||
#define IRDMA_AEQE_TCPSTATE_GEN_3 GENMASK_ULL(3, 0)
|
||||
#define IRDMA_AEQE_QPCQID_GEN_3 GENMASK_ULL(24, 0)
|
||||
#define IRDMA_AEQE_AECODE_GEN_3 GENMASK_ULL(61, 50)
|
||||
#define IRDMA_AEQE_OVERFLOW_GEN_3 BIT_ULL(62)
|
||||
#define IRDMA_AEQE_WQDESCIDX_GEN_3 GENMASK_ULL(49, 32)
|
||||
#define IRDMA_AEQE_IWSTATE_GEN_3 GENMASK_ULL(31, 29)
|
||||
#define IRDMA_AEQE_AESRC_GEN_3 GENMASK_ULL(28, 25)
|
||||
#define IRDMA_AEQE_CMPL_CTXT_S 6
|
||||
#define IRDMA_AEQE_CMPL_CTXT GENMASK_ULL(63, 6)
|
||||
|
||||
#define IRDMA_UDA_QPSQ_NEXT_HDR GENMASK_ULL(23, 16)
|
||||
#define IRDMA_UDA_QPSQ_OPCODE GENMASK_ULL(37, 32)
|
||||
#define IRDMA_UDA_QPSQ_L4LEN GENMASK_ULL(45, 42)
|
||||
@@ -530,11 +501,14 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQPSQ_WQEVALID BIT_ULL(63)
|
||||
#define IRDMA_CQPSQ_TPHVAL GENMASK_ULL(7, 0)
|
||||
|
||||
#define IRDMA_CQPSQ_VSIIDX GENMASK_ULL(17, 8)
|
||||
#define IRDMA_CQPSQ_VSIIDX GENMASK_ULL(23, 8)
|
||||
#define IRDMA_CQPSQ_TPHEN BIT_ULL(60)
|
||||
|
||||
#define IRDMA_CQPSQ_PBUFADDR IRDMA_CQPHC_QPCTX
|
||||
|
||||
#define IRDMA_CQPSQ_PASID GENMASK_ULL(51, 32)
|
||||
#define IRDMA_CQPSQ_PASID_VALID BIT_ULL(62)
|
||||
|
||||
/* Create/Modify/Destroy QP */
|
||||
|
||||
#define IRDMA_CQPSQ_QP_NEWMSS GENMASK_ULL(45, 32)
|
||||
@@ -566,10 +540,30 @@ enum irdma_cqp_op_type {
|
||||
|
||||
#define IRDMA_CQPSQ_QP_DBSHADOWADDR IRDMA_CQPHC_QPCTX
|
||||
|
||||
#define IRDMA_CQPSQ_SRQ_RQSIZE GENMASK_ULL(3, 0)
|
||||
#define IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE GENMASK_ULL(5, 4)
|
||||
#define IRDMA_CQPSQ_SRQ_SRQ_LIMIT GENMASK_ULL(43, 32)
|
||||
#define IRDMA_CQPSQ_SRQ_SRQCTX GENMASK_ULL(63, 6)
|
||||
#define IRDMA_CQPSQ_SRQ_PD_ID GENMASK_ULL(39, 16)
|
||||
#define IRDMA_CQPSQ_SRQ_SRQ_ID GENMASK_ULL(15, 0)
|
||||
#define IRDMA_CQPSQ_SRQ_OP GENMASK_ULL(37, 32)
|
||||
#define IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE GENMASK_ULL(45, 44)
|
||||
#define IRDMA_CQPSQ_SRQ_VIRTMAP BIT_ULL(47)
|
||||
#define IRDMA_CQPSQ_SRQ_TPH_EN BIT_ULL(60)
|
||||
#define IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT BIT_ULL(61)
|
||||
#define IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX GENMASK_ULL(27, 0)
|
||||
#define IRDMA_CQPSQ_SRQ_TPH_VALUE GENMASK_ULL(7, 0)
|
||||
#define IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S 8
|
||||
#define IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR GENMASK_ULL(63, 8)
|
||||
#define IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S 6
|
||||
#define IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR GENMASK_ULL(63, 6)
|
||||
|
||||
#define IRDMA_CQPSQ_CQ_CQSIZE GENMASK_ULL(20, 0)
|
||||
#define IRDMA_CQPSQ_CQ_CQCTX GENMASK_ULL(62, 0)
|
||||
#define IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD GENMASK(17, 0)
|
||||
|
||||
#define IRDMA_CQPSQ_CQ_CQID_HIGH GENMASK_ULL(52, 50)
|
||||
#define IRDMA_CQPSQ_CQ_CEQID_HIGH GENMASK_ULL(59, 54)
|
||||
#define IRDMA_CQPSQ_CQ_OP GENMASK_ULL(37, 32)
|
||||
#define IRDMA_CQPSQ_CQ_CQRESIZE BIT_ULL(43)
|
||||
#define IRDMA_CQPSQ_CQ_LPBLSIZE GENMASK_ULL(45, 44)
|
||||
@@ -590,6 +584,7 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQPSQ_STAG_MR BIT_ULL(43)
|
||||
#define IRDMA_CQPSQ_STAG_MWTYPE BIT_ULL(42)
|
||||
#define IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY BIT_ULL(58)
|
||||
#define IRDMA_CQPSQ_STAG_PDID_HI GENMASK_ULL(59, 54)
|
||||
|
||||
#define IRDMA_CQPSQ_STAG_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
|
||||
#define IRDMA_CQPSQ_STAG_HPAGESIZE GENMASK_ULL(47, 46)
|
||||
@@ -600,7 +595,8 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQPSQ_STAG_USEPFRID BIT_ULL(61)
|
||||
|
||||
#define IRDMA_CQPSQ_STAG_PBA IRDMA_CQPHC_QPCTX
|
||||
#define IRDMA_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(5, 0)
|
||||
#define IRDMA_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(15, 0)
|
||||
#define IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN BIT_ULL(61)
|
||||
|
||||
#define IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
|
||||
#define IRDMA_CQPSQ_QUERYSTAG_IDX IRDMA_CQPSQ_STAG_IDX
|
||||
@@ -628,11 +624,8 @@ enum irdma_cqp_op_type {
|
||||
/* Manage Push Page - MPP */
|
||||
#define IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff
|
||||
#define IRDMA_INVALID_PUSH_PAGE_INDEX 0xffffffff
|
||||
|
||||
#define IRDMA_CQPSQ_MPP_QS_HANDLE GENMASK_ULL(9, 0)
|
||||
#define IRDMA_CQPSQ_MPP_PPIDX GENMASK_ULL(9, 0)
|
||||
#define IRDMA_CQPSQ_MPP_PPIDX GENMASK_ULL(31, 0)
|
||||
#define IRDMA_CQPSQ_MPP_PPTYPE GENMASK_ULL(61, 60)
|
||||
|
||||
#define IRDMA_CQPSQ_MPP_FREE_PAGE BIT_ULL(62)
|
||||
|
||||
/* Upload Context - UCTX */
|
||||
@@ -651,6 +644,8 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQPSQ_CEQ_CEQSIZE GENMASK_ULL(21, 0)
|
||||
#define IRDMA_CQPSQ_CEQ_CEQID GENMASK_ULL(9, 0)
|
||||
|
||||
#define IRDMA_CQPSQ_CEQ_CEQID_HIGH GENMASK_ULL(15, 10)
|
||||
|
||||
#define IRDMA_CQPSQ_CEQ_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
|
||||
#define IRDMA_CQPSQ_CEQ_VMAP BIT_ULL(47)
|
||||
#define IRDMA_CQPSQ_CEQ_ITRNOEXPIRE BIT_ULL(46)
|
||||
@@ -660,10 +655,10 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQPSQ_AEQ_VMAP BIT_ULL(47)
|
||||
#define IRDMA_CQPSQ_AEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
|
||||
|
||||
#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(18, 0)
|
||||
|
||||
#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(20, 0)
|
||||
#define IRDMA_COMMIT_FPM_BASE_S 32
|
||||
#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(5, 0)
|
||||
#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(15, 0)
|
||||
|
||||
#define IRDMA_CQPSQ_FWQE_AECODE GENMASK_ULL(15, 0)
|
||||
#define IRDMA_CQPSQ_FWQE_AESOURCE GENMASK_ULL(19, 16)
|
||||
#define IRDMA_CQPSQ_FWQE_RQMNERR GENMASK_ULL(15, 0)
|
||||
@@ -675,6 +670,10 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQPSQ_FWQE_USERFLCODE BIT_ULL(60)
|
||||
#define IRDMA_CQPSQ_FWQE_FLUSHSQ BIT_ULL(61)
|
||||
#define IRDMA_CQPSQ_FWQE_FLUSHRQ BIT_ULL(62)
|
||||
#define IRDMA_CQPSQ_FWQE_ERR_SQ_IDX_VALID BIT_ULL(42)
|
||||
#define IRDMA_CQPSQ_FWQE_ERR_SQ_IDX GENMASK_ULL(49, 32)
|
||||
#define IRDMA_CQPSQ_FWQE_ERR_RQ_IDX_VALID BIT_ULL(43)
|
||||
#define IRDMA_CQPSQ_FWQE_ERR_RQ_IDX GENMASK_ULL(46, 32)
|
||||
#define IRDMA_CQPSQ_MAPT_PORT GENMASK_ULL(15, 0)
|
||||
#define IRDMA_CQPSQ_MAPT_ADDPORT BIT_ULL(62)
|
||||
#define IRDMA_CQPSQ_UPESD_SDCMD GENMASK_ULL(31, 0)
|
||||
@@ -693,9 +692,12 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQPSQ_SUSPENDQP_QPID GENMASK_ULL(23, 0)
|
||||
#define IRDMA_CQPSQ_RESUMEQP_QSHANDLE GENMASK_ULL(31, 0)
|
||||
#define IRDMA_CQPSQ_RESUMEQP_QPID GENMASK(23, 0)
|
||||
#define IRDMA_MANAGE_RSRC_VER2 BIT_ULL(2)
|
||||
|
||||
#define IRDMA_CQPSQ_MIN_STAG_INVALID 0x0001
|
||||
#define IRDMA_CQPSQ_MIN_SUSPEND_PND 0x0005
|
||||
#define IRDMA_CQPSQ_MIN_DEF_CMPL 0x0006
|
||||
#define IRDMA_CQPSQ_MIN_OOO_CMPL 0x0007
|
||||
|
||||
#define IRDMA_CQPSQ_MAJ_NO_ERROR 0x0000
|
||||
#define IRDMA_CQPSQ_MAJ_OBJCACHE_ERROR 0xF000
|
||||
@@ -712,6 +714,11 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMAQPC_INSERTL2TAG2 BIT_ULL(11)
|
||||
#define IRDMAQPC_LIMIT GENMASK_ULL(13, 12)
|
||||
|
||||
#define IRDMAQPC_USE_SRQ BIT_ULL(10)
|
||||
#define IRDMAQPC_SRQ_ID GENMASK_ULL(15, 0)
|
||||
#define IRDMAQPC_PASID GENMASK_ULL(19, 0)
|
||||
#define IRDMAQPC_PASID_VALID BIT_ULL(11)
|
||||
|
||||
#define IRDMAQPC_ECN_EN BIT_ULL(14)
|
||||
#define IRDMAQPC_DROPOOOSEG BIT_ULL(15)
|
||||
#define IRDMAQPC_DUPACK_THRESH GENMASK_ULL(18, 16)
|
||||
@@ -782,21 +789,31 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMAQPC_CWNDROCE GENMASK_ULL(55, 32)
|
||||
#define IRDMAQPC_SNDWL1 GENMASK_ULL(31, 0)
|
||||
#define IRDMAQPC_SNDWL2 GENMASK_ULL(63, 32)
|
||||
#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(45, 32)
|
||||
#define IRDMAQPC_MINRNR_TIMER GENMASK_ULL(4, 0)
|
||||
#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(46, 32)
|
||||
#define IRDMAQPC_RTOMIN GENMASK_ULL(63, 57)
|
||||
#define IRDMAQPC_MAXSNDWND GENMASK_ULL(31, 0)
|
||||
#define IRDMAQPC_REXMIT_THRESH GENMASK_ULL(53, 48)
|
||||
#define IRDMAQPC_RNRNAK_THRESH GENMASK_ULL(56, 54)
|
||||
#define IRDMAQPC_TXCQNUM GENMASK_ULL(18, 0)
|
||||
#define IRDMAQPC_RXCQNUM GENMASK_ULL(50, 32)
|
||||
#define IRDMAQPC_TXCQNUM GENMASK_ULL(24, 0)
|
||||
#define IRDMAQPC_RXCQNUM GENMASK_ULL(56, 32)
|
||||
#define IRDMAQPC_STAT_INDEX GENMASK_ULL(6, 0)
|
||||
#define IRDMAQPC_Q2ADDR GENMASK_ULL(63, 8)
|
||||
#define IRDMAQPC_LASTBYTESENT GENMASK_ULL(7, 0)
|
||||
#define IRDMAQPC_MACADDRESS GENMASK_ULL(63, 16)
|
||||
#define IRDMAQPC_ORDSIZE GENMASK_ULL(7, 0)
|
||||
|
||||
#define IRDMAQPC_LOCALACKTIMEOUT GENMASK_ULL(12, 8)
|
||||
#define IRDMAQPC_RNRNAK_TMR GENMASK_ULL(4, 0)
|
||||
#define IRDMAQPC_ORDSIZE_GEN3 GENMASK_ULL(10, 0)
|
||||
#define IRDMAQPC_REMOTE_ATOMIC_EN BIT_ULL(18)
|
||||
#define IRDMAQPC_STAT_INDEX_GEN3 GENMASK_ULL(47, 32)
|
||||
#define IRDMAQPC_PKT_LIMIT GENMASK_ULL(55, 48)
|
||||
|
||||
#define IRDMAQPC_IRDSIZE GENMASK_ULL(18, 16)
|
||||
|
||||
#define IRDMAQPC_IRDSIZE_GEN3 GENMASK_ULL(17, 14)
|
||||
|
||||
#define IRDMAQPC_UDPRIVCQENABLE BIT_ULL(19)
|
||||
#define IRDMAQPC_WRRDRSPOK BIT_ULL(20)
|
||||
#define IRDMAQPC_RDOK BIT_ULL(21)
|
||||
@@ -833,6 +850,7 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_FEATURE_INFO GENMASK_ULL(47, 0)
|
||||
#define IRDMA_FEATURE_CNT GENMASK_ULL(47, 32)
|
||||
#define IRDMA_FEATURE_TYPE GENMASK_ULL(63, 48)
|
||||
#define IRDMA_FEATURE_RSRC_MAX GENMASK_ULL(31, 0)
|
||||
|
||||
#define IRDMAQPSQ_OPCODE GENMASK_ULL(37, 32)
|
||||
#define IRDMAQPSQ_COPY_HOST_PBL BIT_ULL(43)
|
||||
@@ -856,7 +874,7 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMAQPSQ_REMSTAGINV GENMASK_ULL(31, 0)
|
||||
#define IRDMAQPSQ_DESTQKEY GENMASK_ULL(31, 0)
|
||||
#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32)
|
||||
#define IRDMAQPSQ_AHID GENMASK_ULL(16, 0)
|
||||
#define IRDMAQPSQ_AHID GENMASK_ULL(24, 0)
|
||||
#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(57)
|
||||
|
||||
#define IRDMA_INLINE_VALID_S 7
|
||||
@@ -869,6 +887,9 @@ enum irdma_cqp_op_type {
|
||||
|
||||
#define IRDMAQPSQ_REMTO IRDMA_CQPHC_QPCTX
|
||||
|
||||
#define IRDMAQPSQ_STAG GENMASK_ULL(31, 0)
|
||||
#define IRDMAQPSQ_REMOTE_STAG GENMASK_ULL(31, 0)
|
||||
|
||||
#define IRDMAQPSQ_STAGRIGHTS GENMASK_ULL(52, 48)
|
||||
#define IRDMAQPSQ_VABASEDTO BIT_ULL(53)
|
||||
#define IRDMAQPSQ_MEMWINDOWTYPE BIT_ULL(54)
|
||||
@@ -879,6 +900,8 @@ enum irdma_cqp_op_type {
|
||||
|
||||
#define IRDMAQPSQ_BASEVA_TO_FBO IRDMA_CQPHC_QPCTX
|
||||
|
||||
#define IRDMAQPSQ_REMOTE_ATOMICS_EN BIT_ULL(55)
|
||||
|
||||
#define IRDMAQPSQ_LOCSTAG GENMASK_ULL(31, 0)
|
||||
|
||||
#define IRDMAQPSQ_STAGKEY GENMASK_ULL(7, 0)
|
||||
@@ -903,11 +926,14 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
|
||||
#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28)
|
||||
|
||||
#define IRDMA_QUERY_FPM_MAX_QPS GENMASK_ULL(18, 0)
|
||||
#define IRDMA_QUERY_FPM_MAX_CQS GENMASK_ULL(19, 0)
|
||||
#define IRDMA_QUERY_FPM_LOC_MEM_PAGES GENMASK_ULL(63, 32)
|
||||
#define IRDMA_QUERY_FPM_MAX_QPS GENMASK_ULL(31, 0)
|
||||
#define IRDMA_QUERY_FPM_MAX_CQS GENMASK_ULL(31, 0)
|
||||
#define IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX GENMASK_ULL(13, 0)
|
||||
#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(45, 32)
|
||||
#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(44, 32)
|
||||
#define IRDMA_QUERY_FPM_MAX_PE_SDS_GEN3 GENMASK_ULL(47, 32)
|
||||
#define IRDMA_QUERY_FPM_MAX_CEQS GENMASK_ULL(9, 0)
|
||||
#define IRDMA_QUERY_FPM_MAX_IRD GENMASK_ULL(53, 50)
|
||||
#define IRDMA_QUERY_FPM_XFBLOCKSIZE GENMASK_ULL(63, 32)
|
||||
#define IRDMA_QUERY_FPM_Q1BLOCKSIZE GENMASK_ULL(63, 32)
|
||||
#define IRDMA_QUERY_FPM_HTMULTIPLIER GENMASK_ULL(19, 16)
|
||||
@@ -1103,7 +1129,7 @@ enum irdma_alignment {
|
||||
IRDMA_CEQ_ALIGNMENT = 0x100,
|
||||
IRDMA_CQ0_ALIGNMENT = 0x100,
|
||||
IRDMA_SD_BUF_ALIGNMENT = 0x80,
|
||||
IRDMA_FEATURE_BUF_ALIGNMENT = 0x8,
|
||||
IRDMA_FEATURE_BUF_ALIGNMENT = 0x10,
|
||||
};
|
||||
|
||||
enum icrdma_protocol_used {
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include "defs.h"
|
||||
#include "type.h"
|
||||
#include "protos.h"
|
||||
#include "virtchnl.h"
|
||||
|
||||
/**
|
||||
* irdma_find_sd_index_limit - finds segment descriptor index limit
|
||||
@@ -228,6 +229,10 @@ int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
|
||||
bool pd_error = false;
|
||||
int ret_code = 0;
|
||||
|
||||
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3 &&
|
||||
dev->hmc_info->hmc_obj[info->rsrc_type].mem_loc == IRDMA_LOC_MEM)
|
||||
return 0;
|
||||
|
||||
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -330,7 +335,7 @@ static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
|
||||
u32 i, sd_idx;
|
||||
struct irdma_dma_mem *mem;
|
||||
|
||||
if (!reset)
|
||||
if (dev->privileged && !reset)
|
||||
ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
|
||||
info->hmc_info->sd_indexes[0],
|
||||
info->del_sd_cnt, false);
|
||||
@@ -376,6 +381,9 @@ int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
|
||||
u32 i, j;
|
||||
int ret_code = 0;
|
||||
|
||||
if (dev->hmc_info->hmc_obj[info->rsrc_type].mem_loc == IRDMA_LOC_MEM)
|
||||
return 0;
|
||||
|
||||
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
|
||||
ibdev_dbg(to_ibdev(dev),
|
||||
"HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
|
||||
@@ -589,7 +597,10 @@ int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
|
||||
pd_entry->sd_index = sd_idx;
|
||||
pd_entry->valid = true;
|
||||
pd_table->use_cnt++;
|
||||
irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
|
||||
|
||||
if (hmc_info->hmc_fn_id < dev->hw_attrs.first_hw_vf_fpm_id &&
|
||||
dev->privileged)
|
||||
irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
|
||||
}
|
||||
pd_entry->bp.use_cnt++;
|
||||
|
||||
@@ -640,7 +651,8 @@ int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
|
||||
pd_addr = pd_table->pd_page_addr.va;
|
||||
pd_addr += rel_pd_idx;
|
||||
memset(pd_addr, 0, sizeof(u64));
|
||||
irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
|
||||
if (dev->privileged && dev->hmc_fn_id == hmc_info->hmc_fn_id)
|
||||
irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
|
||||
|
||||
if (!pd_entry->rsrc_pg) {
|
||||
mem = &pd_entry->bp.addr;
|
||||
|
||||
@@ -16,11 +16,21 @@
|
||||
#define IRDMA_HMC_PD_BP_BUF_ALIGNMENT 4096
|
||||
#define IRDMA_FIRST_VF_FPM_ID 8
|
||||
#define FPM_MULTIPLIER 1024
|
||||
#define IRDMA_OBJ_LOC_MEM_BIT 0x4
|
||||
#define IRDMA_XF_MULTIPLIER 16
|
||||
#define IRDMA_RRF_MULTIPLIER 8
|
||||
#define IRDMA_MIN_PBLE_PAGES 3
|
||||
#define IRDMA_HMC_PAGE_SIZE 2097152
|
||||
#define IRDMA_MIN_MR_PER_QP 4
|
||||
#define IRDMA_MIN_QP_CNT 64
|
||||
#define IRDMA_FSIAV_CNT_MAX 1048576
|
||||
#define IRDMA_MIN_IRD 8
|
||||
#define IRDMA_HMC_MIN_RRF 16
|
||||
|
||||
enum irdma_hmc_rsrc_type {
|
||||
IRDMA_HMC_IW_QP = 0,
|
||||
IRDMA_HMC_IW_CQ = 1,
|
||||
IRDMA_HMC_IW_RESERVED = 2,
|
||||
IRDMA_HMC_IW_SRQ = 2,
|
||||
IRDMA_HMC_IW_HTE = 3,
|
||||
IRDMA_HMC_IW_ARP = 4,
|
||||
IRDMA_HMC_IW_APBVT_ENTRY = 5,
|
||||
@@ -48,11 +58,17 @@ enum irdma_sd_entry_type {
|
||||
IRDMA_SD_TYPE_DIRECT = 2,
|
||||
};
|
||||
|
||||
enum irdma_hmc_obj_mem {
|
||||
IRDMA_HOST_MEM = 0,
|
||||
IRDMA_LOC_MEM = 1,
|
||||
};
|
||||
|
||||
struct irdma_hmc_obj_info {
|
||||
u64 base;
|
||||
u32 max_cnt;
|
||||
u32 cnt;
|
||||
u64 size;
|
||||
enum irdma_hmc_obj_mem mem_loc;
|
||||
};
|
||||
|
||||
struct irdma_hmc_bp {
|
||||
@@ -117,6 +133,7 @@ struct irdma_update_sds_info {
|
||||
struct irdma_ccq_cqe_info;
|
||||
struct irdma_hmc_fcn_info {
|
||||
u32 vf_id;
|
||||
u8 protocol_used;
|
||||
u8 free_fcn;
|
||||
};
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@ static struct irdma_rsrc_limits rsrc_limits_table[] = {
|
||||
static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
|
||||
IRDMA_HMC_IW_QP,
|
||||
IRDMA_HMC_IW_CQ,
|
||||
IRDMA_HMC_IW_SRQ,
|
||||
IRDMA_HMC_IW_HTE,
|
||||
IRDMA_HMC_IW_ARP,
|
||||
IRDMA_HMC_IW_APBVT_ENTRY,
|
||||
@@ -134,75 +135,68 @@ static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
|
||||
static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
|
||||
struct irdma_aeqe_info *info)
|
||||
{
|
||||
struct qp_err_code qp_err;
|
||||
|
||||
qp->sq_flush_code = info->sq;
|
||||
qp->rq_flush_code = info->rq;
|
||||
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
if (qp->qp_uk.uk_attrs->hw_rev >= IRDMA_GEN_3) {
|
||||
if (info->sq) {
|
||||
qp->err_sq_idx_valid = true;
|
||||
qp->err_sq_idx = info->wqe_idx;
|
||||
}
|
||||
if (info->rq) {
|
||||
qp->err_rq_idx_valid = true;
|
||||
qp->err_rq_idx = info->wqe_idx;
|
||||
}
|
||||
}
|
||||
|
||||
switch (info->ae_id) {
|
||||
case IRDMA_AE_AMP_BOUNDS_VIOLATION:
|
||||
case IRDMA_AE_AMP_INVALID_STAG:
|
||||
case IRDMA_AE_AMP_RIGHTS_VIOLATION:
|
||||
case IRDMA_AE_AMP_UNALLOCATED_STAG:
|
||||
case IRDMA_AE_AMP_BAD_PD:
|
||||
case IRDMA_AE_AMP_BAD_QP:
|
||||
case IRDMA_AE_AMP_BAD_STAG_KEY:
|
||||
case IRDMA_AE_AMP_BAD_STAG_INDEX:
|
||||
case IRDMA_AE_AMP_TO_WRAP:
|
||||
case IRDMA_AE_PRIV_OPERATION_DENIED:
|
||||
qp->flush_code = FLUSH_PROT_ERR;
|
||||
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
|
||||
break;
|
||||
case IRDMA_AE_UDA_XMIT_BAD_PD:
|
||||
case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
|
||||
qp->flush_code = FLUSH_LOC_QP_OP_ERR;
|
||||
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
|
||||
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
|
||||
case IRDMA_AE_UDA_L4LEN_INVALID:
|
||||
case IRDMA_AE_DDP_UBE_INVALID_MO:
|
||||
case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
|
||||
qp->flush_code = FLUSH_LOC_LEN_ERR;
|
||||
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
|
||||
case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
|
||||
qp->flush_code = FLUSH_REM_ACCESS_ERR;
|
||||
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
|
||||
break;
|
||||
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
|
||||
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
|
||||
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
|
||||
case IRDMA_AE_IB_REMOTE_OP_ERROR:
|
||||
qp->flush_code = FLUSH_REM_OP_ERR;
|
||||
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_LCE_QP_CATASTROPHIC:
|
||||
qp->flush_code = FLUSH_FATAL_ERR;
|
||||
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
|
||||
qp->flush_code = FLUSH_GENERAL_ERR;
|
||||
break;
|
||||
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
|
||||
qp->flush_code = FLUSH_RETRY_EXC_ERR;
|
||||
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
|
||||
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
|
||||
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
|
||||
case IRDMA_AE_AMP_MWBIND_VALID_STAG:
|
||||
qp->flush_code = FLUSH_MW_BIND_ERR;
|
||||
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
|
||||
break;
|
||||
case IRDMA_AE_IB_INVALID_REQUEST:
|
||||
qp->flush_code = FLUSH_REM_INV_REQ_ERR;
|
||||
qp->event_type = IRDMA_QP_EVENT_REQ_ERR;
|
||||
break;
|
||||
default:
|
||||
qp->flush_code = FLUSH_GENERAL_ERR;
|
||||
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
qp_err = irdma_ae_to_qp_err_code(info->ae_id);
|
||||
qp->flush_code = qp_err.flush_code;
|
||||
qp->event_type = qp_err.event_type;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_complete_cqp_request - perform post-completion cleanup
|
||||
* @cqp: device CQP
|
||||
* @cqp_request: CQP request
|
||||
*
|
||||
* Mark CQP request as done, wake up waiting thread or invoke
|
||||
* callback function and release/free CQP request.
|
||||
*/
|
||||
static void irdma_complete_cqp_request(struct irdma_cqp *cqp,
|
||||
struct irdma_cqp_request *cqp_request)
|
||||
{
|
||||
if (cqp_request->waiting) {
|
||||
WRITE_ONCE(cqp_request->request_done, true);
|
||||
wake_up(&cqp_request->waitq);
|
||||
} else if (cqp_request->callback_fcn) {
|
||||
cqp_request->callback_fcn(cqp_request);
|
||||
}
|
||||
irdma_put_cqp_request(cqp, cqp_request);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_process_ae_def_cmpl - handle IRDMA_AE_CQP_DEFERRED_COMPLETE event
|
||||
* @rf: RDMA PCI function
|
||||
* @info: AEQ entry info
|
||||
*/
|
||||
static void irdma_process_ae_def_cmpl(struct irdma_pci_f *rf,
|
||||
struct irdma_aeqe_info *info)
|
||||
{
|
||||
u32 sw_def_info;
|
||||
u64 scratch;
|
||||
|
||||
irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
|
||||
|
||||
irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, true,
|
||||
&scratch, &sw_def_info);
|
||||
while (scratch) {
|
||||
struct irdma_cqp_request *cqp_request =
|
||||
(struct irdma_cqp_request *)(uintptr_t)scratch;
|
||||
|
||||
irdma_complete_cqp_request(&rf->cqp, cqp_request);
|
||||
irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, false,
|
||||
&scratch, &sw_def_info);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,6 +217,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
|
||||
struct irdma_sc_qp *qp = NULL;
|
||||
struct irdma_qp_host_ctx_info *ctx_info = NULL;
|
||||
struct irdma_device *iwdev = rf->iwdev;
|
||||
struct irdma_sc_srq *srq;
|
||||
unsigned long flags;
|
||||
|
||||
u32 aeqcnt = 0;
|
||||
@@ -236,6 +231,13 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (info->aeqe_overflow) {
|
||||
ibdev_err(&iwdev->ibdev, "AEQ has overflowed\n");
|
||||
rf->reset = true;
|
||||
rf->gen_ops.request_reset(rf);
|
||||
return;
|
||||
}
|
||||
|
||||
aeqcnt++;
|
||||
ibdev_dbg(&iwdev->ibdev,
|
||||
"AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
|
||||
@@ -266,9 +268,12 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
|
||||
if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
|
||||
iwqp->last_aeq = info->ae_id;
|
||||
spin_unlock_irqrestore(&iwqp->lock, flags);
|
||||
ctx_info = &iwqp->ctx_info;
|
||||
} else if (info->srq) {
|
||||
if (info->ae_id != IRDMA_AE_SRQ_LIMIT)
|
||||
continue;
|
||||
} else {
|
||||
if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
|
||||
if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR &&
|
||||
info->ae_id != IRDMA_AE_CQP_DEFERRED_COMPLETE)
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -363,6 +368,18 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
|
||||
}
|
||||
irdma_cq_rem_ref(&iwcq->ibcq);
|
||||
break;
|
||||
case IRDMA_AE_SRQ_LIMIT:
|
||||
srq = (struct irdma_sc_srq *)(uintptr_t)info->compl_ctx;
|
||||
irdma_srq_event(srq);
|
||||
break;
|
||||
case IRDMA_AE_SRQ_CATASTROPHIC_ERROR:
|
||||
break;
|
||||
case IRDMA_AE_CQP_DEFERRED_COMPLETE:
|
||||
/* Remove completed CQP requests from pending list
|
||||
* and notify about those CQP ops completion.
|
||||
*/
|
||||
irdma_process_ae_def_cmpl(rf, info);
|
||||
break;
|
||||
case IRDMA_AE_RESET_NOT_SENT:
|
||||
case IRDMA_AE_LLP_DOUBT_REACHABILITY:
|
||||
case IRDMA_AE_RESOURCE_EXHAUSTION:
|
||||
@@ -389,13 +406,18 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
|
||||
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
|
||||
case IRDMA_AE_LLP_TOO_MANY_RNRS:
|
||||
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
|
||||
case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
|
||||
case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
|
||||
case IRDMA_AE_RCE_QP_CATASTROPHIC:
|
||||
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
|
||||
default:
|
||||
ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
|
||||
info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
|
||||
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
|
||||
ctx_info->roce_info->err_rq_idx_valid = info->rq;
|
||||
if (info->rq) {
|
||||
ctx_info = &iwqp->ctx_info;
|
||||
if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) {
|
||||
ctx_info->roce_info->err_rq_idx_valid =
|
||||
ctx_info->srq_valid ? false : info->err_rq_idx_valid;
|
||||
if (ctx_info->roce_info->err_rq_idx_valid) {
|
||||
ctx_info->roce_info->err_rq_idx = info->wqe_idx;
|
||||
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
|
||||
ctx_info);
|
||||
@@ -599,6 +621,8 @@ static void irdma_destroy_cqp(struct irdma_pci_f *rf)
|
||||
dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
|
||||
cqp->sq.pa);
|
||||
cqp->sq.va = NULL;
|
||||
kfree(cqp->oop_op_array);
|
||||
cqp->oop_op_array = NULL;
|
||||
kfree(cqp->scratch_array);
|
||||
cqp->scratch_array = NULL;
|
||||
kfree(cqp->cqp_requests);
|
||||
@@ -631,7 +655,9 @@ static void irdma_destroy_aeq(struct irdma_pci_f *rf)
|
||||
int status = -EBUSY;
|
||||
|
||||
if (!rf->msix_shared) {
|
||||
rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
|
||||
if (rf->sc_dev.privileged)
|
||||
rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev,
|
||||
rf->iw_msixtbl->idx, false);
|
||||
irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
|
||||
}
|
||||
if (rf->reset)
|
||||
@@ -697,9 +723,10 @@ static void irdma_del_ceq_0(struct irdma_pci_f *rf)
|
||||
|
||||
if (rf->msix_shared) {
|
||||
msix_vec = &rf->iw_msixtbl[0];
|
||||
rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
|
||||
msix_vec->ceq_id,
|
||||
msix_vec->idx, false);
|
||||
if (rf->sc_dev.privileged)
|
||||
rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
|
||||
msix_vec->ceq_id,
|
||||
msix_vec->idx, false);
|
||||
irdma_destroy_irq(rf, msix_vec, rf);
|
||||
} else {
|
||||
msix_vec = &rf->iw_msixtbl[1];
|
||||
@@ -730,8 +757,10 @@ static void irdma_del_ceqs(struct irdma_pci_f *rf)
|
||||
msix_vec = &rf->iw_msixtbl[2];
|
||||
|
||||
for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
|
||||
rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
|
||||
msix_vec->idx, false);
|
||||
if (rf->sc_dev.privileged)
|
||||
rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
|
||||
msix_vec->ceq_id,
|
||||
msix_vec->idx, false);
|
||||
irdma_destroy_irq(rf, msix_vec, iwceq);
|
||||
irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
|
||||
IRDMA_OP_CEQ_DESTROY);
|
||||
@@ -942,6 +971,13 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
|
||||
goto err_scratch;
|
||||
}
|
||||
|
||||
cqp->oop_op_array = kcalloc(sqsize, sizeof(*cqp->oop_op_array),
|
||||
GFP_KERNEL);
|
||||
if (!cqp->oop_op_array) {
|
||||
status = -ENOMEM;
|
||||
goto err_oop;
|
||||
}
|
||||
cqp_init_info.ooo_op_array = cqp->oop_op_array;
|
||||
dev->cqp = &cqp->sc_cqp;
|
||||
dev->cqp->dev = dev;
|
||||
cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
|
||||
@@ -978,6 +1014,10 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
|
||||
case IRDMA_GEN_2:
|
||||
cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
|
||||
break;
|
||||
case IRDMA_GEN_3:
|
||||
cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_3;
|
||||
cqp_init_info.ts_override = 1;
|
||||
break;
|
||||
}
|
||||
status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
|
||||
if (status) {
|
||||
@@ -1012,6 +1052,9 @@ err_ctx:
|
||||
cqp->sq.va, cqp->sq.pa);
|
||||
cqp->sq.va = NULL;
|
||||
err_sq:
|
||||
kfree(cqp->oop_op_array);
|
||||
cqp->oop_op_array = NULL;
|
||||
err_oop:
|
||||
kfree(cqp->scratch_array);
|
||||
cqp->scratch_array = NULL;
|
||||
err_scratch:
|
||||
@@ -1033,13 +1076,15 @@ static int irdma_create_ccq(struct irdma_pci_f *rf)
|
||||
struct irdma_sc_dev *dev = &rf->sc_dev;
|
||||
struct irdma_ccq_init_info info = {};
|
||||
struct irdma_ccq *ccq = &rf->ccq;
|
||||
int ccq_size;
|
||||
int status;
|
||||
|
||||
dev->ccq = &ccq->sc_cq;
|
||||
dev->ccq->dev = dev;
|
||||
info.dev = dev;
|
||||
ccq_size = (rf->rdma_ver >= IRDMA_GEN_3) ? IW_GEN_3_CCQ_SIZE : IW_CCQ_SIZE;
|
||||
ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
|
||||
ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,
|
||||
ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * ccq_size,
|
||||
IRDMA_CQ0_ALIGNMENT);
|
||||
ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
|
||||
&ccq->mem_cq.pa, GFP_KERNEL);
|
||||
@@ -1056,7 +1101,7 @@ static int irdma_create_ccq(struct irdma_pci_f *rf)
|
||||
/* populate the ccq init info */
|
||||
info.cq_base = ccq->mem_cq.va;
|
||||
info.cq_pa = ccq->mem_cq.pa;
|
||||
info.num_elem = IW_CCQ_SIZE;
|
||||
info.num_elem = ccq_size;
|
||||
info.shadow_area = ccq->shadow_area.va;
|
||||
info.shadow_area_pa = ccq->shadow_area.pa;
|
||||
info.ceqe_mask = false;
|
||||
@@ -1140,9 +1185,13 @@ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
|
||||
}
|
||||
|
||||
msix_vec->ceq_id = ceq_id;
|
||||
rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
|
||||
|
||||
return 0;
|
||||
if (rf->sc_dev.privileged)
|
||||
rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id,
|
||||
msix_vec->idx, true);
|
||||
else
|
||||
status = irdma_vchnl_req_ceq_vec_map(&rf->sc_dev, ceq_id,
|
||||
msix_vec->idx);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1155,7 +1204,7 @@ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
|
||||
static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
|
||||
{
|
||||
struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
|
||||
u32 ret = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (!rf->msix_shared) {
|
||||
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
|
||||
@@ -1166,12 +1215,16 @@ static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
|
||||
}
|
||||
if (ret) {
|
||||
ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
|
||||
if (rf->sc_dev.privileged)
|
||||
rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx,
|
||||
true);
|
||||
else
|
||||
ret = irdma_vchnl_req_aeq_vec_map(&rf->sc_dev, msix_vec->idx);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1179,13 +1232,13 @@ static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
|
||||
* @rf: RDMA PCI function
|
||||
* @iwceq: pointer to the ceq resources to be created
|
||||
* @ceq_id: the id number of the iwceq
|
||||
* @vsi: SC vsi struct
|
||||
* @vsi_idx: vsi idx
|
||||
*
|
||||
* Return 0, if the ceq and the resources associated with it
|
||||
* are successfully created, otherwise return error
|
||||
*/
|
||||
static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
|
||||
u32 ceq_id, struct irdma_sc_vsi *vsi)
|
||||
u32 ceq_id, u16 vsi_idx)
|
||||
{
|
||||
int status;
|
||||
struct irdma_ceq_init_info info = {};
|
||||
@@ -1209,7 +1262,7 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
|
||||
info.elem_cnt = ceq_size;
|
||||
iwceq->sc_ceq.ceq_id = ceq_id;
|
||||
info.dev = dev;
|
||||
info.vsi = vsi;
|
||||
info.vsi_idx = vsi_idx;
|
||||
status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
|
||||
if (!status) {
|
||||
if (dev->ceq_valid)
|
||||
@@ -1252,7 +1305,7 @@ static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
|
||||
}
|
||||
|
||||
iwceq = &rf->ceqlist[0];
|
||||
status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
|
||||
status = irdma_create_ceq(rf, iwceq, 0, rf->default_vsi.vsi_idx);
|
||||
if (status) {
|
||||
ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
|
||||
status);
|
||||
@@ -1287,13 +1340,13 @@ exit:
|
||||
/**
|
||||
* irdma_setup_ceqs - manage the device ceq's and their interrupt resources
|
||||
* @rf: RDMA PCI function
|
||||
* @vsi: VSI structure for this CEQ
|
||||
* @vsi_idx: vsi_idx for this CEQ
|
||||
*
|
||||
* Allocate a list for all device completion event queues
|
||||
* Create the ceq's and configure their msix interrupt vectors
|
||||
* Return 0, if ceqs are successfully set up, otherwise return error
|
||||
*/
|
||||
static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
|
||||
static int irdma_setup_ceqs(struct irdma_pci_f *rf, u16 vsi_idx)
|
||||
{
|
||||
u32 i;
|
||||
u32 ceq_id;
|
||||
@@ -1306,7 +1359,7 @@ static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
|
||||
i = (rf->msix_shared) ? 1 : 2;
|
||||
for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
|
||||
iwceq = &rf->ceqlist[ceq_id];
|
||||
status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
|
||||
status = irdma_create_ceq(rf, iwceq, ceq_id, vsi_idx);
|
||||
if (status) {
|
||||
ibdev_dbg(&rf->iwdev->ibdev,
|
||||
"ERR: create ceq status = %d\n", status);
|
||||
@@ -1387,7 +1440,10 @@ static int irdma_create_aeq(struct irdma_pci_f *rf)
|
||||
aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
|
||||
hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
|
||||
aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
|
||||
|
||||
/* GEN_3 does not support virtual AEQ. Cap at max Kernel alloc size */
|
||||
if (rf->rdma_ver == IRDMA_GEN_3)
|
||||
aeq_size = min(aeq_size, (u32)((PAGE_SIZE << MAX_PAGE_ORDER) /
|
||||
sizeof(struct irdma_sc_aeqe)));
|
||||
aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
|
||||
IRDMA_AEQ_ALIGNMENT);
|
||||
aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
|
||||
@@ -1395,6 +1451,8 @@ static int irdma_create_aeq(struct irdma_pci_f *rf)
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (aeq->mem.va)
|
||||
goto skip_virt_aeq;
|
||||
else if (rf->rdma_ver == IRDMA_GEN_3)
|
||||
return -ENOMEM;
|
||||
|
||||
/* physically mapped aeq failed. setup virtual aeq */
|
||||
status = irdma_create_virt_aeq(rf, aeq_size);
|
||||
@@ -1569,6 +1627,8 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf)
|
||||
{
|
||||
struct irdma_sc_dev *dev = &rf->sc_dev;
|
||||
|
||||
if (!rf->sc_dev.privileged)
|
||||
irdma_vchnl_req_put_hmc_fcn(&rf->sc_dev);
|
||||
kfree(dev->hmc_info->sd_table.sd_entry);
|
||||
dev->hmc_info->sd_table.sd_entry = NULL;
|
||||
vfree(rf->mem_rsrc);
|
||||
@@ -1635,6 +1695,7 @@ static int irdma_initialize_dev(struct irdma_pci_f *rf)
|
||||
|
||||
info.bar0 = rf->hw.hw_addr;
|
||||
info.hmc_fn_id = rf->pf_id;
|
||||
info.protocol_used = rf->protocol_used;
|
||||
info.hw = &rf->hw;
|
||||
status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
|
||||
if (status)
|
||||
@@ -1665,9 +1726,6 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
|
||||
irdma_del_local_mac_entry(iwdev->rf,
|
||||
(u8)iwdev->mac_ip_table_idx);
|
||||
fallthrough;
|
||||
case AEQ_CREATED:
|
||||
case PBLE_CHUNK_MEM:
|
||||
case CEQS_CREATED:
|
||||
case IEQ_CREATED:
|
||||
if (!iwdev->roce_mode)
|
||||
irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
|
||||
@@ -1740,7 +1798,9 @@ static void irdma_get_used_rsrc(struct irdma_device *iwdev)
|
||||
iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
|
||||
iwdev->rf->max_qp);
|
||||
iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
|
||||
iwdev->rf->max_cq);
|
||||
iwdev->rf->max_cq);
|
||||
iwdev->rf->used_srqs = find_first_zero_bit(iwdev->rf->allocated_srqs,
|
||||
iwdev->rf->max_srq);
|
||||
iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
|
||||
iwdev->rf->max_mr);
|
||||
}
|
||||
@@ -1750,13 +1810,17 @@ void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
|
||||
enum init_completion_state state = rf->init_state;
|
||||
|
||||
rf->init_state = INVALID_STATE;
|
||||
if (rf->rsrc_created) {
|
||||
irdma_destroy_aeq(rf);
|
||||
irdma_destroy_pble_prm(rf->pble_rsrc);
|
||||
irdma_del_ceqs(rf);
|
||||
rf->rsrc_created = false;
|
||||
}
|
||||
|
||||
switch (state) {
|
||||
case AEQ_CREATED:
|
||||
irdma_destroy_aeq(rf);
|
||||
fallthrough;
|
||||
case PBLE_CHUNK_MEM:
|
||||
irdma_destroy_pble_prm(rf->pble_rsrc);
|
||||
fallthrough;
|
||||
case CEQS_CREATED:
|
||||
irdma_del_ceqs(rf);
|
||||
fallthrough;
|
||||
case CEQ0_CREATED:
|
||||
irdma_del_ceq_0(rf);
|
||||
fallthrough;
|
||||
@@ -1835,32 +1899,6 @@ int irdma_rt_init_hw(struct irdma_device *iwdev,
|
||||
break;
|
||||
iwdev->init_state = IEQ_CREATED;
|
||||
}
|
||||
if (!rf->rsrc_created) {
|
||||
status = irdma_setup_ceqs(rf, &iwdev->vsi);
|
||||
if (status)
|
||||
break;
|
||||
|
||||
iwdev->init_state = CEQS_CREATED;
|
||||
|
||||
status = irdma_hmc_init_pble(&rf->sc_dev,
|
||||
rf->pble_rsrc);
|
||||
if (status) {
|
||||
irdma_del_ceqs(rf);
|
||||
break;
|
||||
}
|
||||
|
||||
iwdev->init_state = PBLE_CHUNK_MEM;
|
||||
|
||||
status = irdma_setup_aeq(rf);
|
||||
if (status) {
|
||||
irdma_destroy_pble_prm(rf->pble_rsrc);
|
||||
irdma_del_ceqs(rf);
|
||||
break;
|
||||
}
|
||||
iwdev->init_state = AEQ_CREATED;
|
||||
rf->rsrc_created = true;
|
||||
}
|
||||
|
||||
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
|
||||
irdma_alloc_set_mac(iwdev);
|
||||
irdma_add_ip(iwdev);
|
||||
@@ -1907,6 +1945,13 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
|
||||
break;
|
||||
rf->init_state = CQP_CREATED;
|
||||
|
||||
dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
|
||||
if (rf->rdma_ver != IRDMA_GEN_1) {
|
||||
status = irdma_get_rdma_features(dev);
|
||||
if (status)
|
||||
break;
|
||||
}
|
||||
|
||||
status = irdma_hmc_setup(rf);
|
||||
if (status)
|
||||
break;
|
||||
@@ -1922,13 +1967,6 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
|
||||
break;
|
||||
rf->init_state = CCQ_CREATED;
|
||||
|
||||
dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
|
||||
if (rf->rdma_ver != IRDMA_GEN_1) {
|
||||
status = irdma_get_rdma_features(dev);
|
||||
if (status)
|
||||
break;
|
||||
}
|
||||
|
||||
status = irdma_setup_ceq_0(rf);
|
||||
if (status)
|
||||
break;
|
||||
@@ -1942,6 +1980,25 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
|
||||
}
|
||||
INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
|
||||
irdma_sc_ccq_arm(dev->ccq);
|
||||
|
||||
status = irdma_setup_ceqs(rf, rf->iwdev ? rf->iwdev->vsi_num : 0);
|
||||
if (status)
|
||||
break;
|
||||
|
||||
rf->init_state = CEQS_CREATED;
|
||||
|
||||
status = irdma_hmc_init_pble(&rf->sc_dev,
|
||||
rf->pble_rsrc);
|
||||
if (status)
|
||||
break;
|
||||
|
||||
rf->init_state = PBLE_CHUNK_MEM;
|
||||
|
||||
status = irdma_setup_aeq(rf);
|
||||
if (status)
|
||||
break;
|
||||
rf->init_state = AEQ_CREATED;
|
||||
|
||||
return 0;
|
||||
} while (0);
|
||||
|
||||
@@ -1960,7 +2017,8 @@ static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
|
||||
rf->allocated_qps = (void *)(rf->mem_rsrc +
|
||||
(sizeof(struct irdma_arp_entry) * rf->arp_table_size));
|
||||
rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
|
||||
rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
|
||||
rf->allocated_srqs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
|
||||
rf->allocated_mrs = &rf->allocated_srqs[BITS_TO_LONGS(rf->max_srq)];
|
||||
rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
|
||||
rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
|
||||
rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
|
||||
@@ -1988,12 +2046,14 @@ static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
|
||||
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
|
||||
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
|
||||
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
|
||||
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq);
|
||||
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
|
||||
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
|
||||
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
|
||||
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
|
||||
rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
|
||||
rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
|
||||
rsrc_size += sizeof(struct irdma_srq **) * rf->max_srq;
|
||||
|
||||
return rsrc_size;
|
||||
}
|
||||
@@ -2021,6 +2081,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
|
||||
rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
|
||||
rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
|
||||
rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
|
||||
rf->max_srq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].cnt;
|
||||
rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
|
||||
rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
|
||||
rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
|
||||
@@ -2040,6 +2101,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
|
||||
set_bit(0, rf->allocated_mrs);
|
||||
set_bit(0, rf->allocated_qps);
|
||||
set_bit(0, rf->allocated_cqs);
|
||||
set_bit(0, rf->allocated_srqs);
|
||||
set_bit(0, rf->allocated_pds);
|
||||
set_bit(0, rf->allocated_arps);
|
||||
set_bit(0, rf->allocated_ahs);
|
||||
@@ -2100,15 +2162,16 @@ void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
|
||||
cqp_request->compl_info.op_ret_val = info.op_ret_val;
|
||||
cqp_request->compl_info.error = info.error;
|
||||
|
||||
if (cqp_request->waiting) {
|
||||
WRITE_ONCE(cqp_request->request_done, true);
|
||||
wake_up(&cqp_request->waitq);
|
||||
irdma_put_cqp_request(&rf->cqp, cqp_request);
|
||||
} else {
|
||||
if (cqp_request->callback_fcn)
|
||||
cqp_request->callback_fcn(cqp_request);
|
||||
irdma_put_cqp_request(&rf->cqp, cqp_request);
|
||||
}
|
||||
/*
|
||||
* If this is deferred or pending completion, then mark
|
||||
* CQP request as pending to not block the CQ, but don't
|
||||
* release CQP request, as it is still on the OOO list.
|
||||
*/
|
||||
if (info.pending)
|
||||
cqp_request->pending = true;
|
||||
else
|
||||
irdma_complete_cqp_request(&rf->cqp,
|
||||
cqp_request);
|
||||
}
|
||||
|
||||
cqe_count++;
|
||||
@@ -2718,7 +2781,9 @@ void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
|
||||
struct irdma_pci_f *rf = iwqp->iwdev->rf;
|
||||
u8 flush_code = iwqp->sc_qp.flush_code;
|
||||
|
||||
if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
|
||||
if ((!(flush_mask & IRDMA_FLUSH_SQ) &&
|
||||
!(flush_mask & IRDMA_FLUSH_RQ)) ||
|
||||
((flush_mask & IRDMA_REFLUSH) && rf->rdma_ver >= IRDMA_GEN_3))
|
||||
return;
|
||||
|
||||
/* Set flush info fields*/
|
||||
@@ -2731,6 +2796,10 @@ void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
|
||||
info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
|
||||
info.rq_minor_code = FLUSH_GENERAL_ERR;
|
||||
info.userflushcode = true;
|
||||
info.err_sq_idx_valid = iwqp->sc_qp.err_sq_idx_valid;
|
||||
info.err_sq_idx = iwqp->sc_qp.err_sq_idx;
|
||||
info.err_rq_idx_valid = iwqp->sc_qp.err_rq_idx_valid;
|
||||
info.err_rq_idx = iwqp->sc_qp.err_rq_idx;
|
||||
|
||||
if (flush_mask & IRDMA_REFLUSH) {
|
||||
if (info.sq)
|
||||
|
||||
@@ -85,6 +85,7 @@ static u64 i40iw_masks[IRDMA_MAX_MASKS] = {
|
||||
I40E_CQPSQ_CQ_CEQID,
|
||||
I40E_CQPSQ_CQ_CQID,
|
||||
I40E_COMMIT_FPM_CQCNT,
|
||||
I40E_CQPSQ_UPESD_HMCFNID,
|
||||
};
|
||||
|
||||
static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
|
||||
@@ -94,6 +95,7 @@ static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
|
||||
I40E_CQPSQ_CQ_CEQID_S,
|
||||
I40E_CQPSQ_CQ_CQID_S,
|
||||
I40E_COMMIT_FPM_CQCNT_S,
|
||||
I40E_CQPSQ_UPESD_HMCFNID_S,
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -123,6 +123,8 @@
|
||||
#define I40E_CQPSQ_CQ_CQID GENMASK_ULL(15, 0)
|
||||
#define I40E_COMMIT_FPM_CQCNT_S 0
|
||||
#define I40E_COMMIT_FPM_CQCNT GENMASK_ULL(17, 0)
|
||||
#define I40E_CQPSQ_UPESD_HMCFNID_S 0
|
||||
#define I40E_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
|
||||
|
||||
#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4))
|
||||
|
||||
|
||||
@@ -75,6 +75,9 @@ static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info
|
||||
struct irdma_pci_f *rf = iwdev->rf;
|
||||
|
||||
rf->rdma_ver = IRDMA_GEN_1;
|
||||
rf->sc_dev.hw = &rf->hw;
|
||||
rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_1;
|
||||
rf->sc_dev.privileged = true;
|
||||
rf->gen_ops.request_reset = i40iw_request_reset;
|
||||
rf->pcidev = cdev_info->pcidev;
|
||||
rf->pf_id = cdev_info->fid;
|
||||
|
||||
@@ -38,6 +38,7 @@ static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
|
||||
ICRDMA_CQPSQ_CQ_CEQID,
|
||||
ICRDMA_CQPSQ_CQ_CQID,
|
||||
ICRDMA_COMMIT_FPM_CQCNT,
|
||||
ICRDMA_CQPSQ_UPESD_HMCFNID,
|
||||
};
|
||||
|
||||
static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
|
||||
@@ -47,6 +48,7 @@ static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
|
||||
ICRDMA_CQPSQ_CQ_CEQID_S,
|
||||
ICRDMA_CQPSQ_CQ_CQID_S,
|
||||
ICRDMA_COMMIT_FPM_CQCNT_S,
|
||||
ICRDMA_CQPSQ_UPESD_HMCFNID_S,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -194,6 +196,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
|
||||
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
|
||||
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
|
||||
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
|
||||
dev->hw_attrs.max_hw_device_pages = ICRDMA_MAX_PUSH_PAGE_COUNT;
|
||||
|
||||
dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
|
||||
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
|
||||
|
||||
@@ -58,14 +58,15 @@
|
||||
#define ICRDMA_CQPSQ_CQ_CQID GENMASK_ULL(18, 0)
|
||||
#define ICRDMA_COMMIT_FPM_CQCNT_S 0
|
||||
#define ICRDMA_COMMIT_FPM_CQCNT GENMASK_ULL(19, 0)
|
||||
|
||||
#define ICRDMA_CQPSQ_UPESD_HMCFNID_S 0
|
||||
#define ICRDMA_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
|
||||
enum icrdma_device_caps_const {
|
||||
ICRDMA_MAX_STATS_COUNT = 128,
|
||||
|
||||
ICRDMA_MAX_IRD_SIZE = 127,
|
||||
ICRDMA_MAX_ORD_SIZE = 255,
|
||||
ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
|
||||
|
||||
ICRDMA_MAX_PUSH_PAGE_COUNT = 256,
|
||||
};
|
||||
|
||||
void icrdma_init_hw(struct irdma_sc_dev *dev);
|
||||
|
||||
343
drivers/infiniband/hw/irdma/icrdma_if.c
Normal file
343
drivers/infiniband/hw/irdma/icrdma_if.c
Normal file
@@ -0,0 +1,343 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
/* Copyright (c) 2015 - 2024 Intel Corporation */
|
||||
|
||||
#include "main.h"
|
||||
#include <linux/net/intel/iidc_rdma_ice.h>
|
||||
|
||||
static void icrdma_prep_tc_change(struct irdma_device *iwdev)
|
||||
{
|
||||
iwdev->vsi.tc_change_pending = true;
|
||||
irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
|
||||
|
||||
/* Wait for all qp's to suspend */
|
||||
wait_event_timeout(iwdev->suspend_wq,
|
||||
!atomic_read(&iwdev->vsi.qp_suspend_reqs),
|
||||
msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
|
||||
irdma_ws_reset(&iwdev->vsi);
|
||||
}
|
||||
|
||||
static void icrdma_fill_qos_info(struct irdma_l2params *l2params,
|
||||
struct iidc_rdma_qos_params *qos_info)
|
||||
{
|
||||
int i;
|
||||
|
||||
l2params->num_tc = qos_info->num_tc;
|
||||
l2params->vsi_prio_type = qos_info->vport_priority_type;
|
||||
l2params->vsi_rel_bw = qos_info->vport_relative_bw;
|
||||
for (i = 0; i < l2params->num_tc; i++) {
|
||||
l2params->tc_info[i].egress_virt_up =
|
||||
qos_info->tc_info[i].egress_virt_up;
|
||||
l2params->tc_info[i].ingress_virt_up =
|
||||
qos_info->tc_info[i].ingress_virt_up;
|
||||
l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
|
||||
l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
|
||||
l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
|
||||
}
|
||||
for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
|
||||
l2params->up2tc[i] = qos_info->up2tc[i];
|
||||
if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) {
|
||||
l2params->dscp_mode = true;
|
||||
memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
|
||||
}
|
||||
}
|
||||
|
||||
static void icrdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
|
||||
struct iidc_rdma_event *event)
|
||||
{
|
||||
struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev);
|
||||
struct irdma_l2params l2params = {};
|
||||
|
||||
if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
|
||||
ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
|
||||
if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
|
||||
l2params.mtu = iwdev->netdev->mtu;
|
||||
l2params.mtu_changed = true;
|
||||
irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
|
||||
irdma_change_l2params(&iwdev->vsi, &l2params);
|
||||
}
|
||||
} else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
|
||||
if (iwdev->vsi.tc_change_pending)
|
||||
return;
|
||||
|
||||
icrdma_prep_tc_change(iwdev);
|
||||
} else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
|
||||
struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
|
||||
|
||||
if (!iwdev->vsi.tc_change_pending)
|
||||
return;
|
||||
|
||||
l2params.tc_changed = true;
|
||||
ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
|
||||
|
||||
icrdma_fill_qos_info(&l2params, &idc_priv->qos_info);
|
||||
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
|
||||
iwdev->dcb_vlan_mode =
|
||||
l2params.num_tc > 1 && !l2params.dscp_mode;
|
||||
irdma_change_l2params(&iwdev->vsi, &l2params);
|
||||
} else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
|
||||
ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
|
||||
event->reg);
|
||||
if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
|
||||
u32 pe_criterr;
|
||||
|
||||
pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
|
||||
#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
|
||||
if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
|
||||
ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
|
||||
pe_criterr);
|
||||
iwdev->rf->reset = true;
|
||||
} else {
|
||||
ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
|
||||
}
|
||||
}
|
||||
if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
|
||||
ibdev_err(&iwdev->ibdev, "HMC Error\n");
|
||||
iwdev->rf->reset = true;
|
||||
}
|
||||
if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
|
||||
ibdev_err(&iwdev->ibdev, "PE Push Error\n");
|
||||
iwdev->rf->reset = true;
|
||||
}
|
||||
if (iwdev->rf->reset)
|
||||
iwdev->rf->gen_ops.request_reset(iwdev->rf);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* icrdma_lan_register_qset - Register qset with LAN driver
|
||||
* @vsi: vsi structure
|
||||
* @tc_node: Traffic class node
|
||||
*/
|
||||
static int icrdma_lan_register_qset(struct irdma_sc_vsi *vsi,
|
||||
struct irdma_ws_node *tc_node)
|
||||
{
|
||||
struct irdma_device *iwdev = vsi->back_vsi;
|
||||
struct iidc_rdma_core_dev_info *cdev_info = iwdev->rf->cdev;
|
||||
struct iidc_rdma_qset_params qset = {};
|
||||
int ret;
|
||||
|
||||
qset.qs_handle = tc_node->qs_handle;
|
||||
qset.tc = tc_node->traffic_class;
|
||||
qset.vport_id = vsi->vsi_idx;
|
||||
ret = ice_add_rdma_qset(cdev_info, &qset);
|
||||
if (ret) {
|
||||
ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
tc_node->l2_sched_node_id = qset.teid;
|
||||
vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* icrdma_lan_unregister_qset - Unregister qset with LAN driver
|
||||
* @vsi: vsi structure
|
||||
* @tc_node: Traffic class node
|
||||
*/
|
||||
static void icrdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
|
||||
struct irdma_ws_node *tc_node)
|
||||
{
|
||||
struct irdma_device *iwdev = vsi->back_vsi;
|
||||
struct iidc_rdma_core_dev_info *cdev_info = iwdev->rf->cdev;
|
||||
struct iidc_rdma_qset_params qset = {};
|
||||
|
||||
qset.qs_handle = tc_node->qs_handle;
|
||||
qset.tc = tc_node->traffic_class;
|
||||
qset.vport_id = vsi->vsi_idx;
|
||||
qset.teid = tc_node->l2_sched_node_id;
|
||||
|
||||
if (ice_del_rdma_qset(cdev_info, &qset))
|
||||
ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* icrdma_request_reset - Request a reset
|
||||
* @rf: RDMA PCI function
|
||||
*/
|
||||
static void icrdma_request_reset(struct irdma_pci_f *rf)
|
||||
{
|
||||
ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
|
||||
ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET);
|
||||
}
|
||||
|
||||
static int icrdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX;
|
||||
rf->msix_entries = kcalloc(rf->msix_count, sizeof(*rf->msix_entries),
|
||||
GFP_KERNEL);
|
||||
if (!rf->msix_entries)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < rf->msix_count; i++)
|
||||
if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i]))
|
||||
break;
|
||||
|
||||
if (i < IRDMA_MIN_MSIX) {
|
||||
while (--i >= 0)
|
||||
ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
|
||||
|
||||
kfree(rf->msix_entries);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rf->msix_count = i;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void icrdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < rf->msix_count; i++)
|
||||
ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
|
||||
|
||||
kfree(rf->msix_entries);
|
||||
}
|
||||
|
||||
static void icrdma_fill_device_info(struct irdma_device *iwdev,
|
||||
struct iidc_rdma_core_dev_info *cdev_info)
|
||||
{
|
||||
struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
|
||||
struct irdma_pci_f *rf = iwdev->rf;
|
||||
|
||||
rf->sc_dev.hw = &rf->hw;
|
||||
rf->iwdev = iwdev;
|
||||
rf->cdev = cdev_info;
|
||||
rf->hw.hw_addr = idc_priv->hw_addr;
|
||||
rf->pcidev = cdev_info->pdev;
|
||||
rf->hw.device = &rf->pcidev->dev;
|
||||
rf->pf_id = idc_priv->pf_id;
|
||||
rf->rdma_ver = IRDMA_GEN_2;
|
||||
rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_2;
|
||||
rf->sc_dev.is_pf = true;
|
||||
rf->sc_dev.privileged = true;
|
||||
|
||||
rf->gen_ops.register_qset = icrdma_lan_register_qset;
|
||||
rf->gen_ops.unregister_qset = icrdma_lan_unregister_qset;
|
||||
|
||||
rf->default_vsi.vsi_idx = idc_priv->vport_id;
|
||||
rf->protocol_used =
|
||||
cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ?
|
||||
IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
|
||||
rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
|
||||
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
|
||||
rf->gen_ops.request_reset = icrdma_request_reset;
|
||||
rf->limits_sel = 7;
|
||||
mutex_init(&rf->ah_tbl_lock);
|
||||
|
||||
iwdev->netdev = idc_priv->netdev;
|
||||
iwdev->vsi_num = idc_priv->vport_id;
|
||||
iwdev->init_state = INITIAL_STATE;
|
||||
iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
|
||||
iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
|
||||
iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
|
||||
iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
|
||||
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
|
||||
iwdev->roce_mode = true;
|
||||
}
|
||||
|
||||
static int icrdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
|
||||
{
|
||||
struct iidc_rdma_core_auxiliary_dev *iidc_adev;
|
||||
struct iidc_rdma_core_dev_info *cdev_info;
|
||||
struct iidc_rdma_priv_dev_info *idc_priv;
|
||||
struct irdma_l2params l2params = {};
|
||||
struct irdma_device *iwdev;
|
||||
struct irdma_pci_f *rf;
|
||||
int err;
|
||||
|
||||
iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
|
||||
cdev_info = iidc_adev->cdev_info;
|
||||
idc_priv = cdev_info->iidc_priv;
|
||||
|
||||
iwdev = ib_alloc_device(irdma_device, ibdev);
|
||||
if (!iwdev)
|
||||
return -ENOMEM;
|
||||
iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
|
||||
if (!iwdev->rf) {
|
||||
ib_dealloc_device(&iwdev->ibdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
icrdma_fill_device_info(iwdev, cdev_info);
|
||||
rf = iwdev->rf;
|
||||
|
||||
err = icrdma_init_interrupts(rf, cdev_info);
|
||||
if (err)
|
||||
goto err_init_interrupts;
|
||||
|
||||
err = irdma_ctrl_init_hw(rf);
|
||||
if (err)
|
||||
goto err_ctrl_init;
|
||||
|
||||
l2params.mtu = iwdev->netdev->mtu;
|
||||
icrdma_fill_qos_info(&l2params, &idc_priv->qos_info);
|
||||
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
|
||||
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
|
||||
|
||||
err = irdma_rt_init_hw(iwdev, &l2params);
|
||||
if (err)
|
||||
goto err_rt_init;
|
||||
|
||||
err = irdma_ib_register_device(iwdev);
|
||||
if (err)
|
||||
goto err_ibreg;
|
||||
|
||||
ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true);
|
||||
|
||||
ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
|
||||
auxiliary_set_drvdata(aux_dev, iwdev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_ibreg:
|
||||
irdma_rt_deinit_hw(iwdev);
|
||||
err_rt_init:
|
||||
irdma_ctrl_deinit_hw(rf);
|
||||
err_ctrl_init:
|
||||
icrdma_deinit_interrupts(rf, cdev_info);
|
||||
err_init_interrupts:
|
||||
kfree(iwdev->rf);
|
||||
ib_dealloc_device(&iwdev->ibdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void icrdma_remove(struct auxiliary_device *aux_dev)
|
||||
{
|
||||
struct iidc_rdma_core_auxiliary_dev *idc_adev =
|
||||
container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
|
||||
struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
|
||||
struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
|
||||
u8 rdma_ver = iwdev->rf->rdma_ver;
|
||||
|
||||
ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false);
|
||||
irdma_ib_unregister_device(iwdev);
|
||||
icrdma_deinit_interrupts(iwdev->rf, cdev_info);
|
||||
|
||||
pr_debug("INIT: Gen[%d] func[%d] device remove success\n",
|
||||
rdma_ver, PCI_FUNC(cdev_info->pdev->devfn));
|
||||
}
|
||||
|
||||
static const struct auxiliary_device_id icrdma_auxiliary_id_table[] = {
|
||||
{.name = "ice.iwarp", },
|
||||
{.name = "ice.roce", },
|
||||
{},
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(auxiliary, icrdma_auxiliary_id_table);
|
||||
|
||||
struct iidc_rdma_core_auxiliary_drv icrdma_core_auxiliary_drv = {
|
||||
.adrv = {
|
||||
.name = "gen_2",
|
||||
.id_table = icrdma_auxiliary_id_table,
|
||||
.probe = icrdma_probe,
|
||||
.remove = icrdma_remove,
|
||||
},
|
||||
.event_handler = icrdma_iidc_event_handler,
|
||||
};
|
||||
170
drivers/infiniband/hw/irdma/ig3rdma_hw.c
Normal file
170
drivers/infiniband/hw/irdma/ig3rdma_hw.c
Normal file
@@ -0,0 +1,170 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
/* Copyright (c) 2018 - 2024 Intel Corporation */
|
||||
#include "osdep.h"
|
||||
#include "type.h"
|
||||
#include "protos.h"
|
||||
#include "ig3rdma_hw.h"
|
||||
|
||||
/**
|
||||
* ig3rdma_ena_irq - Enable interrupt
|
||||
* @dev: pointer to the device structure
|
||||
* @idx: vector index
|
||||
*/
|
||||
static void ig3rdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
|
||||
{
|
||||
u32 val;
|
||||
u32 int_stride = 1; /* one u32 per register */
|
||||
|
||||
if (dev->is_pf)
|
||||
int_stride = 0x400;
|
||||
else
|
||||
idx--; /* VFs use DYN_CTL_N */
|
||||
|
||||
val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) |
|
||||
FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1);
|
||||
|
||||
writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx * int_stride));
|
||||
}
|
||||
|
||||
/**
|
||||
* ig3rdma_disable_irq - Disable interrupt
|
||||
* @dev: pointer to the device structure
|
||||
* @idx: vector index
|
||||
*/
|
||||
static void ig3rdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
|
||||
{
|
||||
u32 int_stride = 1; /* one u32 per register */
|
||||
|
||||
if (dev->is_pf)
|
||||
int_stride = 0x400;
|
||||
else
|
||||
idx--; /* VFs use DYN_CTL_N */
|
||||
|
||||
writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx * int_stride));
|
||||
}
|
||||
|
||||
static const struct irdma_irq_ops ig3rdma_irq_ops = {
|
||||
.irdma_dis_irq = ig3rdma_disable_irq,
|
||||
.irdma_en_irq = ig3rdma_ena_irq,
|
||||
};
|
||||
|
||||
static const struct irdma_hw_stat_map ig3rdma_hw_stat_map[] = {
|
||||
[IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 32, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 40, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 48, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 56, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 64, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 72, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 80, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 88, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 96, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 104, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 112, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 120, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 128, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 136, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 144, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 152, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 160, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 168, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 176, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 184, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 192, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 200, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 208, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 224, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 232, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 240, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 248, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 256, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 264, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 272, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 280, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 288, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 296, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 304, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 312, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMAVBND] = { 320, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMAVINV] = { 328, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = { 336, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = { 344, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = { 352, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = { 360, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RNR_SENT] = { 368, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RNR_RCVD] = { 376, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT] = { 384, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT] = { 392, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMARXATS] = { 408, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RDMATXATS] = { 416, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_NAKSEQERR] = { 424, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED] = { 432, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RTO] = { 440, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_RXOOOPKTS] = { 448, 0, 0 },
|
||||
[IRDMA_HW_STAT_INDEX_ICRCERR] = { 456, 0, 0 },
|
||||
};
|
||||
|
||||
void ig3rdma_init_hw(struct irdma_sc_dev *dev)
|
||||
{
|
||||
dev->irq_ops = &ig3rdma_irq_ops;
|
||||
dev->hw_stats_map = ig3rdma_hw_stat_map;
|
||||
|
||||
dev->hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_3;
|
||||
dev->hw_attrs.uk_attrs.max_hw_wq_frags = IG3RDMA_MAX_WQ_FRAGMENT_COUNT;
|
||||
dev->hw_attrs.uk_attrs.max_hw_read_sges = IG3RDMA_MAX_SGE_RD;
|
||||
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
|
||||
dev->hw_attrs.first_hw_vf_fpm_id = 0;
|
||||
dev->hw_attrs.max_hw_vf_fpm_id = IG3_MAX_APFS + IG3_MAX_AVFS;
|
||||
dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_64_BYTE_CQE;
|
||||
dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_CQE_TIMESTAMPING;
|
||||
|
||||
dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_SRQ;
|
||||
dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
|
||||
IRDMA_FEATURE_CQ_RESIZE;
|
||||
dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
|
||||
dev->hw_attrs.max_hw_ird = IG3RDMA_MAX_IRD_SIZE;
|
||||
dev->hw_attrs.max_hw_ord = IG3RDMA_MAX_ORD_SIZE;
|
||||
dev->hw_attrs.max_stat_inst = IG3RDMA_MAX_STATS_COUNT;
|
||||
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_3;
|
||||
dev->hw_attrs.uk_attrs.min_hw_wq_size = IG3RDMA_MIN_WQ_SIZE;
|
||||
dev->hw_attrs.uk_attrs.max_hw_srq_quanta = IRDMA_SRQ_MAX_QUANTA;
|
||||
dev->hw_attrs.uk_attrs.max_hw_inline = IG3RDMA_MAX_INLINE_DATA_SIZE;
|
||||
dev->hw_attrs.max_hw_device_pages =
|
||||
dev->is_pf ? IG3RDMA_MAX_PF_PUSH_PAGE_COUNT : IG3RDMA_MAX_VF_PUSH_PAGE_COUNT;
|
||||
}
|
||||
|
||||
static void __iomem *__ig3rdma_get_reg_addr(struct irdma_mmio_region *region, u64 reg_offset)
|
||||
{
|
||||
if (reg_offset >= region->offset &&
|
||||
reg_offset < (region->offset + region->len)) {
|
||||
reg_offset -= region->offset;
|
||||
|
||||
return region->addr + reg_offset;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset)
|
||||
{
|
||||
u8 __iomem *reg_addr;
|
||||
int i;
|
||||
|
||||
reg_addr = __ig3rdma_get_reg_addr(&hw->rdma_reg, reg_offset);
|
||||
if (reg_addr)
|
||||
return reg_addr;
|
||||
|
||||
for (i = 0; i < hw->num_io_regions; i++) {
|
||||
reg_addr = __ig3rdma_get_reg_addr(&hw->io_regs[i], reg_offset);
|
||||
if (reg_addr)
|
||||
return reg_addr;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
32
drivers/infiniband/hw/irdma/ig3rdma_hw.h
Normal file
32
drivers/infiniband/hw/irdma/ig3rdma_hw.h
Normal file
@@ -0,0 +1,32 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
|
||||
/* Copyright (c) 2021 - 2024 Intel Corporation */
|
||||
#ifndef IG3RDMA_HW_H
|
||||
#define IG3RDMA_HW_H
|
||||
|
||||
#define IG3_MAX_APFS 1
|
||||
#define IG3_MAX_AVFS 0
|
||||
|
||||
#define IG3_PF_RDMA_REGION_OFFSET 0xBC00000
|
||||
#define IG3_PF_RDMA_REGION_LEN 0x401000
|
||||
#define IG3_VF_RDMA_REGION_OFFSET 0x8C00
|
||||
#define IG3_VF_RDMA_REGION_LEN 0x8400
|
||||
|
||||
enum ig3rdma_device_caps_const {
|
||||
IG3RDMA_MAX_WQ_FRAGMENT_COUNT = 14,
|
||||
IG3RDMA_MAX_SGE_RD = 14,
|
||||
|
||||
IG3RDMA_MAX_STATS_COUNT = 128,
|
||||
|
||||
IG3RDMA_MAX_IRD_SIZE = 64,
|
||||
IG3RDMA_MAX_ORD_SIZE = 64,
|
||||
IG3RDMA_MIN_WQ_SIZE = 16 /* WQEs */,
|
||||
IG3RDMA_MAX_INLINE_DATA_SIZE = 216,
|
||||
IG3RDMA_MAX_PF_PUSH_PAGE_COUNT = 8192,
|
||||
IG3RDMA_MAX_VF_PUSH_PAGE_COUNT = 16,
|
||||
};
|
||||
|
||||
void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset);
|
||||
int ig3rdma_vchnl_send_sync(struct irdma_sc_dev *dev, u8 *msg, u16 len,
|
||||
u8 *recv_msg, u16 *recv_len);
|
||||
|
||||
#endif /* IG3RDMA_HW_H*/
|
||||
232
drivers/infiniband/hw/irdma/ig3rdma_if.c
Normal file
232
drivers/infiniband/hw/irdma/ig3rdma_if.c
Normal file
@@ -0,0 +1,232 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
/* Copyright (c) 2023 - 2024 Intel Corporation */
|
||||
|
||||
#include "main.h"
|
||||
#include <linux/net/intel/iidc_rdma_idpf.h>
|
||||
#include "ig3rdma_hw.h"
|
||||
|
||||
static void ig3rdma_idc_core_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
|
||||
struct iidc_rdma_event *event)
|
||||
{
|
||||
struct irdma_pci_f *rf = auxiliary_get_drvdata(cdev_info->adev);
|
||||
|
||||
if (*event->type & BIT(IIDC_RDMA_EVENT_WARN_RESET)) {
|
||||
rf->reset = true;
|
||||
rf->sc_dev.vchnl_up = false;
|
||||
}
|
||||
}
|
||||
|
||||
int ig3rdma_vchnl_send_sync(struct irdma_sc_dev *dev, u8 *msg, u16 len,
|
||||
u8 *recv_msg, u16 *recv_len)
|
||||
{
|
||||
struct iidc_rdma_core_dev_info *cdev_info = dev_to_rf(dev)->cdev;
|
||||
int ret;
|
||||
|
||||
ret = idpf_idc_rdma_vc_send_sync(cdev_info, msg, len, recv_msg,
|
||||
recv_len);
|
||||
if (ret == -ETIMEDOUT) {
|
||||
ibdev_err(&(dev_to_rf(dev)->iwdev->ibdev),
|
||||
"Virtual channel Req <-> Resp completion timeout\n");
|
||||
dev->vchnl_up = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ig3rdma_vchnl_init(struct irdma_pci_f *rf,
|
||||
struct iidc_rdma_core_dev_info *cdev_info,
|
||||
u8 *rdma_ver)
|
||||
{
|
||||
struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
|
||||
struct irdma_vchnl_init_info virt_info;
|
||||
u8 gen = rf->rdma_ver;
|
||||
int ret;
|
||||
|
||||
rf->vchnl_wq = alloc_ordered_workqueue("irdma-virtchnl-wq", 0);
|
||||
if (!rf->vchnl_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&rf->sc_dev.vchnl_mutex);
|
||||
|
||||
virt_info.is_pf = !idc_priv->ftype;
|
||||
virt_info.hw_rev = gen;
|
||||
virt_info.privileged = gen == IRDMA_GEN_2;
|
||||
virt_info.vchnl_wq = rf->vchnl_wq;
|
||||
ret = irdma_sc_vchnl_init(&rf->sc_dev, &virt_info);
|
||||
if (ret) {
|
||||
destroy_workqueue(rf->vchnl_wq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*rdma_ver = rf->sc_dev.hw_attrs.uk_attrs.hw_rev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ig3rdma_request_reset - Request a reset
|
||||
* @rf: RDMA PCI function
|
||||
*/
|
||||
static void ig3rdma_request_reset(struct irdma_pci_f *rf)
|
||||
{
|
||||
ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
|
||||
idpf_idc_request_reset(rf->cdev, IIDC_FUNC_RESET);
|
||||
}
|
||||
|
||||
static int ig3rdma_cfg_regions(struct irdma_hw *hw,
|
||||
struct iidc_rdma_core_dev_info *cdev_info)
|
||||
{
|
||||
struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
|
||||
struct pci_dev *pdev = cdev_info->pdev;
|
||||
int i;
|
||||
|
||||
switch (idc_priv->ftype) {
|
||||
case IIDC_FUNCTION_TYPE_PF:
|
||||
hw->rdma_reg.len = IG3_PF_RDMA_REGION_LEN;
|
||||
hw->rdma_reg.offset = IG3_PF_RDMA_REGION_OFFSET;
|
||||
break;
|
||||
case IIDC_FUNCTION_TYPE_VF:
|
||||
hw->rdma_reg.len = IG3_VF_RDMA_REGION_LEN;
|
||||
hw->rdma_reg.offset = IG3_VF_RDMA_REGION_OFFSET;
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
hw->rdma_reg.addr = ioremap(pci_resource_start(pdev, 0) + hw->rdma_reg.offset,
|
||||
hw->rdma_reg.len);
|
||||
|
||||
if (!hw->rdma_reg.addr)
|
||||
return -ENOMEM;
|
||||
|
||||
hw->num_io_regions = le16_to_cpu(idc_priv->num_memory_regions);
|
||||
hw->io_regs = kcalloc(hw->num_io_regions,
|
||||
sizeof(struct irdma_mmio_region), GFP_KERNEL);
|
||||
|
||||
if (!hw->io_regs) {
|
||||
iounmap(hw->rdma_reg.addr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < hw->num_io_regions; i++) {
|
||||
hw->io_regs[i].addr =
|
||||
idc_priv->mapped_mem_regions[i].region_addr;
|
||||
hw->io_regs[i].len =
|
||||
le64_to_cpu(idc_priv->mapped_mem_regions[i].size);
|
||||
hw->io_regs[i].offset =
|
||||
le64_to_cpu(idc_priv->mapped_mem_regions[i].start_offset);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ig3rdma_decfg_rf(struct irdma_pci_f *rf)
|
||||
{
|
||||
struct irdma_hw *hw = &rf->hw;
|
||||
|
||||
destroy_workqueue(rf->vchnl_wq);
|
||||
kfree(hw->io_regs);
|
||||
iounmap(hw->rdma_reg.addr);
|
||||
}
|
||||
|
||||
static int ig3rdma_cfg_rf(struct irdma_pci_f *rf,
|
||||
struct iidc_rdma_core_dev_info *cdev_info)
|
||||
{
|
||||
struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
|
||||
int err;
|
||||
|
||||
rf->sc_dev.hw = &rf->hw;
|
||||
rf->cdev = cdev_info;
|
||||
rf->pcidev = cdev_info->pdev;
|
||||
rf->hw.device = &rf->pcidev->dev;
|
||||
rf->msix_count = idc_priv->msix_count;
|
||||
rf->msix_entries = idc_priv->msix_entries;
|
||||
|
||||
err = ig3rdma_vchnl_init(rf, cdev_info, &rf->rdma_ver);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = ig3rdma_cfg_regions(&rf->hw, cdev_info);
|
||||
if (err) {
|
||||
destroy_workqueue(rf->vchnl_wq);
|
||||
return err;
|
||||
}
|
||||
|
||||
rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
|
||||
rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
|
||||
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
|
||||
rf->gen_ops.request_reset = ig3rdma_request_reset;
|
||||
rf->limits_sel = 7;
|
||||
mutex_init(&rf->ah_tbl_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ig3rdma_core_probe(struct auxiliary_device *aux_dev,
|
||||
const struct auxiliary_device_id *id)
|
||||
{
|
||||
struct iidc_rdma_core_auxiliary_dev *idc_adev =
|
||||
container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
|
||||
struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
|
||||
struct irdma_pci_f *rf;
|
||||
int err;
|
||||
|
||||
rf = kzalloc(sizeof(*rf), GFP_KERNEL);
|
||||
if (!rf)
|
||||
return -ENOMEM;
|
||||
|
||||
err = ig3rdma_cfg_rf(rf, cdev_info);
|
||||
if (err)
|
||||
goto err_cfg_rf;
|
||||
|
||||
err = irdma_ctrl_init_hw(rf);
|
||||
if (err)
|
||||
goto err_ctrl_init;
|
||||
|
||||
auxiliary_set_drvdata(aux_dev, rf);
|
||||
|
||||
err = idpf_idc_vport_dev_ctrl(cdev_info, true);
|
||||
if (err)
|
||||
goto err_vport_ctrl;
|
||||
|
||||
return 0;
|
||||
|
||||
err_vport_ctrl:
|
||||
irdma_ctrl_deinit_hw(rf);
|
||||
err_ctrl_init:
|
||||
ig3rdma_decfg_rf(rf);
|
||||
err_cfg_rf:
|
||||
kfree(rf);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ig3rdma_core_remove(struct auxiliary_device *aux_dev)
|
||||
{
|
||||
struct iidc_rdma_core_auxiliary_dev *idc_adev =
|
||||
container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
|
||||
struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
|
||||
struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_dev);
|
||||
|
||||
idpf_idc_vport_dev_ctrl(cdev_info, false);
|
||||
irdma_ctrl_deinit_hw(rf);
|
||||
ig3rdma_decfg_rf(rf);
|
||||
kfree(rf);
|
||||
}
|
||||
|
||||
static const struct auxiliary_device_id ig3rdma_core_auxiliary_id_table[] = {
|
||||
{.name = "idpf.8086.rdma.core", },
|
||||
{},
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(auxiliary, ig3rdma_core_auxiliary_id_table);
|
||||
|
||||
struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv = {
|
||||
.adrv = {
|
||||
.name = "core",
|
||||
.id_table = ig3rdma_core_auxiliary_id_table,
|
||||
.probe = ig3rdma_core_probe,
|
||||
.remove = ig3rdma_core_remove,
|
||||
},
|
||||
.event_handler = ig3rdma_idc_core_event_handler,
|
||||
};
|
||||
@@ -32,7 +32,16 @@
|
||||
#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW GENMASK(31, 12)
|
||||
#define IRDMA_PFHMC_SDCMD_PMSDWR BIT(31)
|
||||
|
||||
#define IRDMA_INVALID_CQ_IDX 0xffffffff
|
||||
#define IRDMA_INVALID_CQ_IDX 0xffffffff
|
||||
#define IRDMA_Q_INVALID_IDX 0xffff
|
||||
|
||||
enum irdma_dyn_idx_t {
|
||||
IRDMA_IDX_ITR0 = 0,
|
||||
IRDMA_IDX_ITR1 = 1,
|
||||
IRDMA_IDX_ITR2 = 2,
|
||||
IRDMA_IDX_NOITR = 3,
|
||||
};
|
||||
|
||||
enum irdma_registers {
|
||||
IRDMA_CQPTAIL,
|
||||
IRDMA_CQPDB,
|
||||
@@ -67,6 +76,7 @@ enum irdma_shifts {
|
||||
IRDMA_CQPSQ_CQ_CEQID_S,
|
||||
IRDMA_CQPSQ_CQ_CQID_S,
|
||||
IRDMA_COMMIT_FPM_CQCNT_S,
|
||||
IRDMA_CQPSQ_UPESD_HMCFNID_S,
|
||||
IRDMA_MAX_SHIFTS,
|
||||
};
|
||||
|
||||
@@ -77,6 +87,7 @@ enum irdma_masks {
|
||||
IRDMA_CQPSQ_CQ_CEQID_M,
|
||||
IRDMA_CQPSQ_CQ_CQID_M,
|
||||
IRDMA_COMMIT_FPM_CQCNT_M,
|
||||
IRDMA_CQPSQ_UPESD_HMCFNID_M,
|
||||
IRDMA_MAX_MASKS, /* Must be last entry */
|
||||
};
|
||||
|
||||
@@ -92,7 +103,7 @@ struct irdma_mcast_grp_ctx_entry_info {
|
||||
struct irdma_mcast_grp_info {
|
||||
u8 dest_mac_addr[ETH_ALEN];
|
||||
u16 vlan_id;
|
||||
u8 hmc_fcn_id;
|
||||
u16 hmc_fcn_id;
|
||||
bool ipv4_valid:1;
|
||||
bool vlan_valid:1;
|
||||
u16 mg_id;
|
||||
@@ -107,6 +118,9 @@ enum irdma_vers {
|
||||
IRDMA_GEN_RSVD,
|
||||
IRDMA_GEN_1,
|
||||
IRDMA_GEN_2,
|
||||
IRDMA_GEN_3,
|
||||
IRDMA_GEN_NEXT,
|
||||
IRDMA_GEN_MAX = IRDMA_GEN_NEXT-1
|
||||
};
|
||||
|
||||
struct irdma_uk_attrs {
|
||||
@@ -118,6 +132,7 @@ struct irdma_uk_attrs {
|
||||
u32 max_hw_wq_quanta;
|
||||
u32 min_hw_cq_size;
|
||||
u32 max_hw_cq_size;
|
||||
u32 max_hw_srq_quanta;
|
||||
u16 max_hw_sq_chunk;
|
||||
u16 min_hw_wq_size;
|
||||
u8 hw_rev;
|
||||
@@ -147,10 +162,13 @@ struct irdma_hw_attrs {
|
||||
u32 max_done_count;
|
||||
u32 max_sleep_count;
|
||||
u32 max_cqp_compl_wait_time_ms;
|
||||
u32 min_hw_srq_id;
|
||||
u16 max_stat_inst;
|
||||
u16 max_stat_idx;
|
||||
};
|
||||
|
||||
void i40iw_init_hw(struct irdma_sc_dev *dev);
|
||||
void icrdma_init_hw(struct irdma_sc_dev *dev);
|
||||
void ig3rdma_init_hw(struct irdma_sc_dev *dev);
|
||||
void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset);
|
||||
#endif /* IRDMA_H*/
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
/* Copyright (c) 2015 - 2021 Intel Corporation */
|
||||
#include "main.h"
|
||||
#include <linux/net/intel/iidc_rdma_idpf.h>
|
||||
|
||||
MODULE_ALIAS("i40iw");
|
||||
MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA");
|
||||
@@ -38,19 +39,7 @@ static void irdma_unregister_notifiers(void)
|
||||
unregister_netdevice_notifier(&irdma_netdevice_notifier);
|
||||
}
|
||||
|
||||
static void irdma_prep_tc_change(struct irdma_device *iwdev)
|
||||
{
|
||||
iwdev->vsi.tc_change_pending = true;
|
||||
irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
|
||||
|
||||
/* Wait for all qp's to suspend */
|
||||
wait_event_timeout(iwdev->suspend_wq,
|
||||
!atomic_read(&iwdev->vsi.qp_suspend_reqs),
|
||||
msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
|
||||
irdma_ws_reset(&iwdev->vsi);
|
||||
}
|
||||
|
||||
static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
|
||||
void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
|
||||
{
|
||||
if (mtu < IRDMA_MIN_MTU_IPV4)
|
||||
ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
|
||||
@@ -58,35 +47,10 @@ static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
|
||||
ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
|
||||
}
|
||||
|
||||
static void irdma_fill_qos_info(struct irdma_l2params *l2params,
|
||||
struct iidc_rdma_qos_params *qos_info)
|
||||
static void ig3rdma_idc_vport_event_handler(struct iidc_rdma_vport_dev_info *cdev_info,
|
||||
struct iidc_rdma_event *event)
|
||||
{
|
||||
int i;
|
||||
|
||||
l2params->num_tc = qos_info->num_tc;
|
||||
l2params->vsi_prio_type = qos_info->vport_priority_type;
|
||||
l2params->vsi_rel_bw = qos_info->vport_relative_bw;
|
||||
for (i = 0; i < l2params->num_tc; i++) {
|
||||
l2params->tc_info[i].egress_virt_up =
|
||||
qos_info->tc_info[i].egress_virt_up;
|
||||
l2params->tc_info[i].ingress_virt_up =
|
||||
qos_info->tc_info[i].ingress_virt_up;
|
||||
l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
|
||||
l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
|
||||
l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
|
||||
}
|
||||
for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
|
||||
l2params->up2tc[i] = qos_info->up2tc[i];
|
||||
if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) {
|
||||
l2params->dscp_mode = true;
|
||||
memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
|
||||
}
|
||||
}
|
||||
|
||||
static void irdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
|
||||
struct iidc_rdma_event *event)
|
||||
{
|
||||
struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev);
|
||||
struct irdma_device *iwdev = auxiliary_get_drvdata(cdev_info->adev);
|
||||
struct irdma_l2params l2params = {};
|
||||
|
||||
if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
|
||||
@@ -97,248 +61,39 @@ static void irdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
|
||||
irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
|
||||
irdma_change_l2params(&iwdev->vsi, &l2params);
|
||||
}
|
||||
} else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
|
||||
if (iwdev->vsi.tc_change_pending)
|
||||
return;
|
||||
|
||||
irdma_prep_tc_change(iwdev);
|
||||
} else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
|
||||
struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
|
||||
|
||||
if (!iwdev->vsi.tc_change_pending)
|
||||
return;
|
||||
|
||||
l2params.tc_changed = true;
|
||||
ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
|
||||
|
||||
irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
|
||||
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
|
||||
iwdev->dcb_vlan_mode =
|
||||
l2params.num_tc > 1 && !l2params.dscp_mode;
|
||||
irdma_change_l2params(&iwdev->vsi, &l2params);
|
||||
} else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
|
||||
ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
|
||||
event->reg);
|
||||
if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
|
||||
u32 pe_criterr;
|
||||
|
||||
pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
|
||||
#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
|
||||
if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
|
||||
ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
|
||||
pe_criterr);
|
||||
iwdev->rf->reset = true;
|
||||
} else {
|
||||
ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
|
||||
}
|
||||
}
|
||||
if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
|
||||
ibdev_err(&iwdev->ibdev, "HMC Error\n");
|
||||
iwdev->rf->reset = true;
|
||||
}
|
||||
if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
|
||||
ibdev_err(&iwdev->ibdev, "PE Push Error\n");
|
||||
iwdev->rf->reset = true;
|
||||
}
|
||||
if (iwdev->rf->reset)
|
||||
iwdev->rf->gen_ops.request_reset(iwdev->rf);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_request_reset - Request a reset
|
||||
* @rf: RDMA PCI function
|
||||
*/
|
||||
static void irdma_request_reset(struct irdma_pci_f *rf)
|
||||
static int ig3rdma_vport_probe(struct auxiliary_device *aux_dev,
|
||||
const struct auxiliary_device_id *id)
|
||||
{
|
||||
ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
|
||||
ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET);
|
||||
}
|
||||
struct iidc_rdma_vport_auxiliary_dev *idc_adev =
|
||||
container_of(aux_dev, struct iidc_rdma_vport_auxiliary_dev, adev);
|
||||
struct auxiliary_device *aux_core_dev = idc_adev->vdev_info->core_adev;
|
||||
struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_core_dev);
|
||||
struct irdma_l2params l2params = {};
|
||||
struct irdma_device *iwdev;
|
||||
int err;
|
||||
|
||||
/**
|
||||
* irdma_lan_register_qset - Register qset with LAN driver
|
||||
* @vsi: vsi structure
|
||||
* @tc_node: Traffic class node
|
||||
*/
|
||||
static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
|
||||
struct irdma_ws_node *tc_node)
|
||||
{
|
||||
struct irdma_device *iwdev = vsi->back_vsi;
|
||||
struct iidc_rdma_core_dev_info *cdev_info;
|
||||
struct iidc_rdma_qset_params qset = {};
|
||||
int ret;
|
||||
|
||||
cdev_info = iwdev->rf->cdev;
|
||||
qset.qs_handle = tc_node->qs_handle;
|
||||
qset.tc = tc_node->traffic_class;
|
||||
qset.vport_id = vsi->vsi_idx;
|
||||
ret = ice_add_rdma_qset(cdev_info, &qset);
|
||||
if (ret) {
|
||||
ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
tc_node->l2_sched_node_id = qset.teid;
|
||||
vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_lan_unregister_qset - Unregister qset with LAN driver
|
||||
* @vsi: vsi structure
|
||||
* @tc_node: Traffic class node
|
||||
*/
|
||||
static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
|
||||
struct irdma_ws_node *tc_node)
|
||||
{
|
||||
struct irdma_device *iwdev = vsi->back_vsi;
|
||||
struct iidc_rdma_core_dev_info *cdev_info;
|
||||
struct iidc_rdma_qset_params qset = {};
|
||||
|
||||
cdev_info = iwdev->rf->cdev;
|
||||
qset.qs_handle = tc_node->qs_handle;
|
||||
qset.tc = tc_node->traffic_class;
|
||||
qset.vport_id = vsi->vsi_idx;
|
||||
qset.teid = tc_node->l2_sched_node_id;
|
||||
|
||||
if (ice_del_rdma_qset(cdev_info, &qset))
|
||||
ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
|
||||
}
|
||||
|
||||
static int irdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX;
|
||||
rf->msix_entries = kcalloc(rf->msix_count, sizeof(*rf->msix_entries),
|
||||
GFP_KERNEL);
|
||||
if (!rf->msix_entries)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < rf->msix_count; i++)
|
||||
if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i]))
|
||||
break;
|
||||
|
||||
if (i < IRDMA_MIN_MSIX) {
|
||||
while (--i >= 0)
|
||||
ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
|
||||
|
||||
kfree(rf->msix_entries);
|
||||
if (!rf) {
|
||||
WARN_ON_ONCE(1);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rf->msix_count = i;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < rf->msix_count; i++)
|
||||
ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
|
||||
|
||||
kfree(rf->msix_entries);
|
||||
}
|
||||
|
||||
static void irdma_remove(struct auxiliary_device *aux_dev)
|
||||
{
|
||||
struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
|
||||
struct iidc_rdma_core_auxiliary_dev *iidc_adev;
|
||||
struct iidc_rdma_core_dev_info *cdev_info;
|
||||
|
||||
iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
|
||||
cdev_info = iidc_adev->cdev_info;
|
||||
|
||||
ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false);
|
||||
irdma_ib_unregister_device(iwdev);
|
||||
irdma_deinit_interrupts(iwdev->rf, cdev_info);
|
||||
|
||||
kfree(iwdev->rf);
|
||||
|
||||
pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(cdev_info->pdev->devfn));
|
||||
}
|
||||
|
||||
static void irdma_fill_device_info(struct irdma_device *iwdev,
|
||||
struct iidc_rdma_core_dev_info *cdev_info)
|
||||
{
|
||||
struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
|
||||
struct irdma_pci_f *rf = iwdev->rf;
|
||||
|
||||
rf->sc_dev.hw = &rf->hw;
|
||||
rf->iwdev = iwdev;
|
||||
rf->cdev = cdev_info;
|
||||
rf->hw.hw_addr = iidc_priv->hw_addr;
|
||||
rf->pcidev = cdev_info->pdev;
|
||||
rf->hw.device = &rf->pcidev->dev;
|
||||
rf->pf_id = iidc_priv->pf_id;
|
||||
rf->gen_ops.register_qset = irdma_lan_register_qset;
|
||||
rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
|
||||
|
||||
rf->default_vsi.vsi_idx = iidc_priv->vport_id;
|
||||
rf->protocol_used =
|
||||
cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ?
|
||||
IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
|
||||
rf->rdma_ver = IRDMA_GEN_2;
|
||||
rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
|
||||
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
|
||||
rf->gen_ops.request_reset = irdma_request_reset;
|
||||
rf->limits_sel = 7;
|
||||
rf->iwdev = iwdev;
|
||||
|
||||
mutex_init(&iwdev->ah_tbl_lock);
|
||||
|
||||
iwdev->netdev = iidc_priv->netdev;
|
||||
iwdev->vsi_num = iidc_priv->vport_id;
|
||||
iwdev = ib_alloc_device(irdma_device, ibdev);
|
||||
/* Fill iwdev info */
|
||||
iwdev->is_vport = true;
|
||||
iwdev->rf = rf;
|
||||
iwdev->vport_id = idc_adev->vdev_info->vport_id;
|
||||
iwdev->netdev = idc_adev->vdev_info->netdev;
|
||||
iwdev->init_state = INITIAL_STATE;
|
||||
iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
|
||||
iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
|
||||
iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
|
||||
iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
|
||||
if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY)
|
||||
iwdev->roce_mode = true;
|
||||
}
|
||||
|
||||
static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
|
||||
{
|
||||
struct iidc_rdma_core_auxiliary_dev *iidc_adev;
|
||||
struct iidc_rdma_core_dev_info *cdev_info;
|
||||
struct iidc_rdma_priv_dev_info *iidc_priv;
|
||||
struct irdma_l2params l2params = {};
|
||||
struct irdma_device *iwdev;
|
||||
struct irdma_pci_f *rf;
|
||||
int err;
|
||||
|
||||
iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
|
||||
cdev_info = iidc_adev->cdev_info;
|
||||
iidc_priv = cdev_info->iidc_priv;
|
||||
|
||||
iwdev = ib_alloc_device(irdma_device, ibdev);
|
||||
if (!iwdev)
|
||||
return -ENOMEM;
|
||||
iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
|
||||
if (!iwdev->rf) {
|
||||
ib_dealloc_device(&iwdev->ibdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
irdma_fill_device_info(iwdev, cdev_info);
|
||||
rf = iwdev->rf;
|
||||
|
||||
err = irdma_init_interrupts(rf, cdev_info);
|
||||
if (err)
|
||||
goto err_init_interrupts;
|
||||
|
||||
err = irdma_ctrl_init_hw(rf);
|
||||
if (err)
|
||||
goto err_ctrl_init;
|
||||
iwdev->roce_mode = true;
|
||||
iwdev->push_mode = false;
|
||||
|
||||
l2params.mtu = iwdev->netdev->mtu;
|
||||
irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
|
||||
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
|
||||
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
|
||||
|
||||
err = irdma_rt_init_hw(iwdev, &l2params);
|
||||
if (err)
|
||||
@@ -348,43 +103,57 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
|
||||
if (err)
|
||||
goto err_ibreg;
|
||||
|
||||
ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true);
|
||||
|
||||
ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
|
||||
auxiliary_set_drvdata(aux_dev, iwdev);
|
||||
|
||||
return 0;
|
||||
ibdev_dbg(&iwdev->ibdev,
|
||||
"INIT: Gen[%d] vport[%d] probe success. dev_name = %s, core_dev_name = %s, netdev=%s\n",
|
||||
rf->rdma_ver, idc_adev->vdev_info->vport_id,
|
||||
dev_name(&aux_dev->dev),
|
||||
dev_name(&idc_adev->vdev_info->core_adev->dev),
|
||||
netdev_name(idc_adev->vdev_info->netdev));
|
||||
|
||||
return 0;
|
||||
err_ibreg:
|
||||
irdma_rt_deinit_hw(iwdev);
|
||||
err_rt_init:
|
||||
irdma_ctrl_deinit_hw(rf);
|
||||
err_ctrl_init:
|
||||
irdma_deinit_interrupts(rf, cdev_info);
|
||||
err_init_interrupts:
|
||||
kfree(iwdev->rf);
|
||||
ib_dealloc_device(&iwdev->ibdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct auxiliary_device_id irdma_auxiliary_id_table[] = {
|
||||
{.name = "ice.iwarp", },
|
||||
{.name = "ice.roce", },
|
||||
static void ig3rdma_vport_remove(struct auxiliary_device *aux_dev)
|
||||
{
|
||||
struct iidc_rdma_vport_auxiliary_dev *idc_adev =
|
||||
container_of(aux_dev, struct iidc_rdma_vport_auxiliary_dev, adev);
|
||||
struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
|
||||
|
||||
ibdev_dbg(&iwdev->ibdev,
|
||||
"INIT: Gen[%d] dev_name = %s, core_dev_name = %s, netdev=%s\n",
|
||||
iwdev->rf->rdma_ver, dev_name(&aux_dev->dev),
|
||||
dev_name(&idc_adev->vdev_info->core_adev->dev),
|
||||
netdev_name(idc_adev->vdev_info->netdev));
|
||||
|
||||
irdma_ib_unregister_device(iwdev);
|
||||
}
|
||||
|
||||
static const struct auxiliary_device_id ig3rdma_vport_auxiliary_id_table[] = {
|
||||
{.name = "idpf.8086.rdma.vdev", },
|
||||
{},
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table);
|
||||
MODULE_DEVICE_TABLE(auxiliary, ig3rdma_vport_auxiliary_id_table);
|
||||
|
||||
static struct iidc_rdma_core_auxiliary_drv irdma_auxiliary_drv = {
|
||||
static struct iidc_rdma_vport_auxiliary_drv ig3rdma_vport_auxiliary_drv = {
|
||||
.adrv = {
|
||||
.id_table = irdma_auxiliary_id_table,
|
||||
.probe = irdma_probe,
|
||||
.remove = irdma_remove,
|
||||
.name = "vdev",
|
||||
.id_table = ig3rdma_vport_auxiliary_id_table,
|
||||
.probe = ig3rdma_vport_probe,
|
||||
.remove = ig3rdma_vport_remove,
|
||||
},
|
||||
.event_handler = irdma_iidc_event_handler,
|
||||
.event_handler = ig3rdma_idc_vport_event_handler,
|
||||
};
|
||||
|
||||
|
||||
static int __init irdma_init_module(void)
|
||||
{
|
||||
int ret;
|
||||
@@ -396,14 +165,34 @@ static int __init irdma_init_module(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = auxiliary_driver_register(&irdma_auxiliary_drv.adrv);
|
||||
ret = auxiliary_driver_register(&icrdma_core_auxiliary_drv.adrv);
|
||||
if (ret) {
|
||||
auxiliary_driver_unregister(&i40iw_auxiliary_drv);
|
||||
pr_err("Failed irdma auxiliary_driver_register() ret=%d\n",
|
||||
pr_err("Failed icrdma(gen_2) auxiliary_driver_register() ret=%d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = auxiliary_driver_register(&ig3rdma_core_auxiliary_drv.adrv);
|
||||
if (ret) {
|
||||
auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
|
||||
auxiliary_driver_unregister(&i40iw_auxiliary_drv);
|
||||
pr_err("Failed ig3rdma(gen_3) core auxiliary_driver_register() ret=%d\n",
|
||||
ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = auxiliary_driver_register(&ig3rdma_vport_auxiliary_drv.adrv);
|
||||
if (ret) {
|
||||
auxiliary_driver_unregister(&ig3rdma_core_auxiliary_drv.adrv);
|
||||
auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
|
||||
auxiliary_driver_unregister(&i40iw_auxiliary_drv);
|
||||
pr_err("Failed ig3rdma vport auxiliary_driver_register() ret=%d\n",
|
||||
ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
irdma_register_notifiers();
|
||||
|
||||
return 0;
|
||||
@@ -412,8 +201,10 @@ static int __init irdma_init_module(void)
|
||||
static void __exit irdma_exit_module(void)
|
||||
{
|
||||
irdma_unregister_notifiers();
|
||||
auxiliary_driver_unregister(&irdma_auxiliary_drv.adrv);
|
||||
auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
|
||||
auxiliary_driver_unregister(&i40iw_auxiliary_drv);
|
||||
auxiliary_driver_unregister(&ig3rdma_core_auxiliary_drv.adrv);
|
||||
auxiliary_driver_unregister(&ig3rdma_vport_auxiliary_drv.adrv);
|
||||
}
|
||||
|
||||
module_init(irdma_init_module);
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
#endif
|
||||
#include <linux/auxiliary_bus.h>
|
||||
#include <linux/net/intel/iidc_rdma.h>
|
||||
#include <linux/net/intel/iidc_rdma_ice.h>
|
||||
#include <rdma/ib_smi.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_pack.h>
|
||||
@@ -54,6 +53,8 @@
|
||||
#include "puda.h"
|
||||
|
||||
extern struct auxiliary_driver i40iw_auxiliary_drv;
|
||||
extern struct iidc_rdma_core_auxiliary_drv icrdma_core_auxiliary_drv;
|
||||
extern struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv;
|
||||
|
||||
#define IRDMA_FW_VER_DEFAULT 2
|
||||
#define IRDMA_HW_VER 2
|
||||
@@ -65,7 +66,8 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
|
||||
#define IRDMA_MACIP_ADD 1
|
||||
#define IRDMA_MACIP_DELETE 2
|
||||
|
||||
#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 1)
|
||||
#define IW_GEN_3_CCQ_SIZE (2 * IRDMA_CQP_SW_SQSIZE_2048 + 2)
|
||||
#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 2)
|
||||
#define IW_CEQ_SIZE 2048
|
||||
#define IW_AEQ_SIZE 2048
|
||||
|
||||
@@ -127,12 +129,12 @@ enum init_completion_state {
|
||||
HMC_OBJS_CREATED,
|
||||
HW_RSRC_INITIALIZED,
|
||||
CCQ_CREATED,
|
||||
CEQ0_CREATED, /* Last state of probe */
|
||||
ILQ_CREATED,
|
||||
IEQ_CREATED,
|
||||
CEQ0_CREATED,
|
||||
CEQS_CREATED,
|
||||
PBLE_CHUNK_MEM,
|
||||
AEQ_CREATED,
|
||||
ILQ_CREATED,
|
||||
IEQ_CREATED, /* Last state of probe */
|
||||
IP_ADDR_REGISTERED, /* Last state of open */
|
||||
};
|
||||
|
||||
@@ -167,6 +169,7 @@ struct irdma_cqp_request {
|
||||
bool request_done; /* READ/WRITE_ONCE macros operate on it */
|
||||
bool waiting:1;
|
||||
bool dynamic:1;
|
||||
bool pending:1;
|
||||
};
|
||||
|
||||
struct irdma_cqp {
|
||||
@@ -179,6 +182,7 @@ struct irdma_cqp {
|
||||
struct irdma_dma_mem host_ctx;
|
||||
u64 *scratch_array;
|
||||
struct irdma_cqp_request *cqp_requests;
|
||||
struct irdma_ooo_cqp_op *oop_op_array;
|
||||
struct list_head cqp_avail_reqs;
|
||||
struct list_head cqp_pending_reqs;
|
||||
};
|
||||
@@ -257,6 +261,7 @@ struct irdma_pci_f {
|
||||
bool reset:1;
|
||||
bool rsrc_created:1;
|
||||
bool msix_shared:1;
|
||||
bool hwqp1_rsvd:1;
|
||||
u8 rsrc_profile;
|
||||
u8 *hmc_info_mem;
|
||||
u8 *mem_rsrc;
|
||||
@@ -269,6 +274,8 @@ struct irdma_pci_f {
|
||||
u32 max_mr;
|
||||
u32 max_qp;
|
||||
u32 max_cq;
|
||||
u32 max_srq;
|
||||
u32 next_srq;
|
||||
u32 max_ah;
|
||||
u32 next_ah;
|
||||
u32 max_mcg;
|
||||
@@ -282,6 +289,7 @@ struct irdma_pci_f {
|
||||
u32 mr_stagmask;
|
||||
u32 used_pds;
|
||||
u32 used_cqs;
|
||||
u32 used_srqs;
|
||||
u32 used_mrs;
|
||||
u32 used_qps;
|
||||
u32 arp_table_size;
|
||||
@@ -293,6 +301,7 @@ struct irdma_pci_f {
|
||||
unsigned long *allocated_ws_nodes;
|
||||
unsigned long *allocated_qps;
|
||||
unsigned long *allocated_cqs;
|
||||
unsigned long *allocated_srqs;
|
||||
unsigned long *allocated_mrs;
|
||||
unsigned long *allocated_pds;
|
||||
unsigned long *allocated_mcgs;
|
||||
@@ -327,10 +336,13 @@ struct irdma_pci_f {
|
||||
wait_queue_head_t vchnl_waitq;
|
||||
struct workqueue_struct *cqp_cmpl_wq;
|
||||
struct work_struct cqp_cmpl_work;
|
||||
struct workqueue_struct *vchnl_wq;
|
||||
struct irdma_sc_vsi default_vsi;
|
||||
void *back_fcn;
|
||||
struct irdma_gen_ops gen_ops;
|
||||
struct irdma_device *iwdev;
|
||||
DECLARE_HASHTABLE(ah_hash_tbl, 8);
|
||||
struct mutex ah_tbl_lock; /* protect AH hash table access */
|
||||
};
|
||||
|
||||
struct irdma_device {
|
||||
@@ -340,8 +352,6 @@ struct irdma_device {
|
||||
struct workqueue_struct *cleanup_wq;
|
||||
struct irdma_sc_vsi vsi;
|
||||
struct irdma_cm_core cm_core;
|
||||
DECLARE_HASHTABLE(ah_hash_tbl, 8);
|
||||
struct mutex ah_tbl_lock; /* protect AH hash table access */
|
||||
u32 roce_cwnd;
|
||||
u32 roce_ackcreds;
|
||||
u32 vendor_id;
|
||||
@@ -350,12 +360,14 @@ struct irdma_device {
|
||||
u32 rcv_wnd;
|
||||
u16 mac_ip_table_idx;
|
||||
u16 vsi_num;
|
||||
u16 vport_id;
|
||||
u8 rcv_wscale;
|
||||
u8 iw_status;
|
||||
bool roce_mode:1;
|
||||
bool roce_dcqcn_en:1;
|
||||
bool dcb_vlan_mode:1;
|
||||
bool iw_ooo:1;
|
||||
bool is_vport:1;
|
||||
enum init_completion_state init_state;
|
||||
|
||||
wait_queue_head_t suspend_wq;
|
||||
@@ -413,6 +425,11 @@ static inline struct irdma_pci_f *dev_to_rf(struct irdma_sc_dev *dev)
|
||||
return container_of(dev, struct irdma_pci_f, sc_dev);
|
||||
}
|
||||
|
||||
static inline struct irdma_srq *to_iwsrq(struct ib_srq *ibsrq)
|
||||
{
|
||||
return container_of(ibsrq, struct irdma_srq, ibsrq);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_alloc_resource - allocate a resource
|
||||
* @iwdev: device pointer
|
||||
@@ -508,7 +525,8 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
void irdma_cq_add_ref(struct ib_cq *ibcq);
|
||||
void irdma_cq_rem_ref(struct ib_cq *ibcq);
|
||||
void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
|
||||
|
||||
void irdma_srq_event(struct irdma_sc_srq *srq);
|
||||
void irdma_srq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_srq *srq);
|
||||
void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
|
||||
int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
|
||||
struct irdma_modify_qp_info *info, bool wait);
|
||||
@@ -557,4 +575,5 @@ int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
|
||||
void *ptr);
|
||||
void irdma_add_ip(struct irdma_device *iwdev);
|
||||
void cqp_compl_worker(struct work_struct *work);
|
||||
void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev);
|
||||
#endif /* IRDMA_MAIN_H */
|
||||
|
||||
@@ -193,8 +193,15 @@ static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
|
||||
{
|
||||
enum irdma_sd_entry_type sd_entry_type;
|
||||
|
||||
sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
|
||||
IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
|
||||
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
|
||||
sd_entry_type = (!idx->rel_pd_idx &&
|
||||
pages == IRDMA_HMC_PD_CNT_IN_SD) ?
|
||||
IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
|
||||
else
|
||||
sd_entry_type = (!idx->rel_pd_idx &&
|
||||
pages == IRDMA_HMC_PD_CNT_IN_SD &&
|
||||
dev->privileged) ?
|
||||
IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
|
||||
return sd_entry_type;
|
||||
}
|
||||
|
||||
@@ -279,10 +286,11 @@ static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
|
||||
sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
|
||||
sd_entry->u.pd_table.pd_page_addr.pa :
|
||||
sd_entry->u.bp.addr.pa;
|
||||
|
||||
if (!sd_entry->valid) {
|
||||
ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
|
||||
idx->sd_idx, sd_entry->entry_type, true);
|
||||
if ((dev->privileged && !sd_entry->valid) ||
|
||||
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
|
||||
ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id,
|
||||
sd_reg_val, idx->sd_idx,
|
||||
sd_entry->entry_type, true);
|
||||
if (ret_code)
|
||||
goto error;
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#define ALL_TC2PFC 0xff
|
||||
#define CQP_COMPL_WAIT_TIME_MS 10
|
||||
#define CQP_TIMEOUT_THRESHOLD 500
|
||||
#define CQP_DEF_CMPL_TIMEOUT_THRESHOLD 2500
|
||||
|
||||
/* init operations */
|
||||
int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
|
||||
|
||||
@@ -91,7 +91,7 @@ struct irdma_puda_rsrc_info {
|
||||
u32 rq_size;
|
||||
u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
|
||||
u16 buf_size;
|
||||
u8 stats_idx;
|
||||
u16 stats_idx;
|
||||
bool stats_idx_valid:1;
|
||||
int abi_ver;
|
||||
};
|
||||
@@ -140,7 +140,7 @@ struct irdma_puda_rsrc {
|
||||
u64 crc_err;
|
||||
u64 pmode_count;
|
||||
u64 partials_handled;
|
||||
u8 stats_idx;
|
||||
u16 stats_idx;
|
||||
bool check_crc:1;
|
||||
bool stats_idx_valid:1;
|
||||
};
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
#include "hmc.h"
|
||||
#include "uda.h"
|
||||
#include "ws.h"
|
||||
#include "virtchnl.h"
|
||||
|
||||
#define IRDMA_DEBUG_ERR "ERR"
|
||||
#define IRDMA_DEBUG_INIT "INIT"
|
||||
#define IRDMA_DEBUG_DEV "DEV"
|
||||
@@ -95,12 +97,6 @@ enum irdma_term_mpa_errors {
|
||||
MPA_REQ_RSP = 0x04,
|
||||
};
|
||||
|
||||
enum irdma_qp_event_type {
|
||||
IRDMA_QP_EVENT_CATASTROPHIC,
|
||||
IRDMA_QP_EVENT_ACCESS_ERR,
|
||||
IRDMA_QP_EVENT_REQ_ERR,
|
||||
};
|
||||
|
||||
enum irdma_hw_stats_index {
|
||||
/* gen1 - 32-bit */
|
||||
IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
|
||||
@@ -154,12 +150,46 @@ enum irdma_hw_stats_index {
|
||||
IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
|
||||
IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
|
||||
IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
|
||||
|
||||
/* gen3 */
|
||||
IRDMA_HW_STAT_INDEX_RNR_SENT = 46,
|
||||
IRDMA_HW_STAT_INDEX_RNR_RCVD = 47,
|
||||
IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT = 48,
|
||||
IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT = 49,
|
||||
IRDMA_HW_STAT_INDEX_RDMARXATS = 50,
|
||||
IRDMA_HW_STAT_INDEX_RDMATXATS = 51,
|
||||
IRDMA_HW_STAT_INDEX_NAKSEQERR = 52,
|
||||
IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED = 53,
|
||||
IRDMA_HW_STAT_INDEX_RTO = 54,
|
||||
IRDMA_HW_STAT_INDEX_RXOOOPKTS = 55,
|
||||
IRDMA_HW_STAT_INDEX_ICRCERR = 56,
|
||||
|
||||
IRDMA_HW_STAT_INDEX_MAX_GEN_3 = 57,
|
||||
};
|
||||
|
||||
enum irdma_feature_type {
|
||||
IRDMA_FEATURE_FW_INFO = 0,
|
||||
IRDMA_HW_VERSION_INFO = 1,
|
||||
IRDMA_QP_MAX_INCR = 2,
|
||||
IRDMA_CQ_MAX_INCR = 3,
|
||||
IRDMA_CEQ_MAX_INCR = 4,
|
||||
IRDMA_SD_MAX_INCR = 5,
|
||||
IRDMA_MR_MAX_INCR = 6,
|
||||
IRDMA_Q1_MAX_INCR = 7,
|
||||
IRDMA_AH_MAX_INCR = 8,
|
||||
IRDMA_SRQ_MAX_INCR = 9,
|
||||
IRDMA_TIMER_MAX_INCR = 10,
|
||||
IRDMA_XF_MAX_INCR = 11,
|
||||
IRDMA_RRF_MAX_INCR = 12,
|
||||
IRDMA_PBLE_MAX_INCR = 13,
|
||||
IRDMA_OBJ_1 = 22,
|
||||
IRDMA_OBJ_2 = 23,
|
||||
IRDMA_ENDPT_TRK = 24,
|
||||
IRDMA_FTN_INLINE_MAX = 25,
|
||||
IRDMA_QSETS_MAX = 26,
|
||||
IRDMA_ASO = 27,
|
||||
IRDMA_FTN_FLAGS = 32,
|
||||
IRDMA_FTN_NOP = 33,
|
||||
IRDMA_MAX_FEATURES, /* Must be last entry */
|
||||
};
|
||||
|
||||
@@ -206,6 +236,7 @@ enum irdma_syn_rst_handling {
|
||||
enum irdma_queue_type {
|
||||
IRDMA_QUEUE_TYPE_SQ_RQ = 0,
|
||||
IRDMA_QUEUE_TYPE_CQP,
|
||||
IRDMA_QUEUE_TYPE_SRQ,
|
||||
};
|
||||
|
||||
struct irdma_sc_dev;
|
||||
@@ -233,12 +264,22 @@ struct irdma_cqp_init_info {
|
||||
__le64 *host_ctx;
|
||||
u64 *scratch_array;
|
||||
u32 sq_size;
|
||||
struct irdma_ooo_cqp_op *ooo_op_array;
|
||||
u32 pe_en_vf_cnt;
|
||||
u16 hw_maj_ver;
|
||||
u16 hw_min_ver;
|
||||
u8 struct_ver;
|
||||
u8 hmc_profile;
|
||||
u8 ena_vf_count;
|
||||
u8 ceqs_per_vf;
|
||||
u8 ooisc_blksize;
|
||||
u8 rrsp_blksize;
|
||||
u8 q1_blksize;
|
||||
u8 xmit_blksize;
|
||||
u8 ts_override;
|
||||
u8 ts_shift;
|
||||
u8 en_fine_grained_timers;
|
||||
u8 blksizes_valid;
|
||||
bool en_datacenter_tcp:1;
|
||||
bool disable_packed:1;
|
||||
bool rocev2_rto_policy:1;
|
||||
@@ -310,9 +351,21 @@ struct irdma_vsi_pestat {
|
||||
spinlock_t lock; /* rdma stats lock */
|
||||
};
|
||||
|
||||
struct irdma_mmio_region {
|
||||
u8 __iomem *addr;
|
||||
resource_size_t len;
|
||||
resource_size_t offset;
|
||||
};
|
||||
|
||||
struct irdma_hw {
|
||||
u8 __iomem *hw_addr;
|
||||
u8 __iomem *priv_hw_addr;
|
||||
union {
|
||||
u8 __iomem *hw_addr;
|
||||
struct {
|
||||
struct irdma_mmio_region rdma_reg; /* RDMA region */
|
||||
struct irdma_mmio_region *io_regs; /* Non-RDMA MMIO regions */
|
||||
u16 num_io_regions; /* Number of Non-RDMA MMIO regions */
|
||||
};
|
||||
};
|
||||
struct device *device;
|
||||
struct irdma_hmc_info hmc;
|
||||
};
|
||||
@@ -351,7 +404,21 @@ struct irdma_cqp_quanta {
|
||||
__le64 elem[IRDMA_CQP_WQE_SIZE];
|
||||
};
|
||||
|
||||
struct irdma_ooo_cqp_op {
|
||||
struct list_head list_entry;
|
||||
u64 scratch;
|
||||
u32 def_info;
|
||||
u32 sw_def_info;
|
||||
u32 wqe_idx;
|
||||
bool deferred:1;
|
||||
};
|
||||
|
||||
struct irdma_sc_cqp {
|
||||
spinlock_t ooo_list_lock; /* protects list of pending completions */
|
||||
struct list_head ooo_avail;
|
||||
struct list_head ooo_pnd;
|
||||
u32 last_def_cmpl_ticket;
|
||||
u32 sw_def_cmpl_ticket;
|
||||
u32 size;
|
||||
u64 sq_pa;
|
||||
u64 host_ctx_pa;
|
||||
@@ -367,8 +434,10 @@ struct irdma_sc_cqp {
|
||||
u64 *scratch_array;
|
||||
u64 requested_ops;
|
||||
atomic64_t completed_ops;
|
||||
struct irdma_ooo_cqp_op *ooo_op_array;
|
||||
u32 cqp_id;
|
||||
u32 sq_size;
|
||||
u32 pe_en_vf_cnt;
|
||||
u32 hw_sq_size;
|
||||
u16 hw_maj_ver;
|
||||
u16 hw_min_ver;
|
||||
@@ -378,6 +447,14 @@ struct irdma_sc_cqp {
|
||||
u8 ena_vf_count;
|
||||
u8 timeout_count;
|
||||
u8 ceqs_per_vf;
|
||||
u8 ooisc_blksize;
|
||||
u8 rrsp_blksize;
|
||||
u8 q1_blksize;
|
||||
u8 xmit_blksize;
|
||||
u8 ts_override;
|
||||
u8 ts_shift;
|
||||
u8 en_fine_grained_timers;
|
||||
u8 blksizes_valid;
|
||||
bool en_datacenter_tcp:1;
|
||||
bool disable_packed:1;
|
||||
bool rocev2_rto_policy:1;
|
||||
@@ -397,6 +474,8 @@ struct irdma_sc_aeq {
|
||||
u32 msix_idx;
|
||||
u8 polarity;
|
||||
bool virtual_map:1;
|
||||
bool pasid_valid:1;
|
||||
u32 pasid;
|
||||
};
|
||||
|
||||
struct irdma_sc_ceq {
|
||||
@@ -412,13 +491,15 @@ struct irdma_sc_ceq {
|
||||
u8 tph_val;
|
||||
u32 first_pm_pbl_idx;
|
||||
u8 polarity;
|
||||
struct irdma_sc_vsi *vsi;
|
||||
u16 vsi_idx;
|
||||
struct irdma_sc_cq **reg_cq;
|
||||
u32 reg_cq_size;
|
||||
spinlock_t req_cq_lock; /* protect access to reg_cq array */
|
||||
bool virtual_map:1;
|
||||
bool tph_en:1;
|
||||
bool itr_no_expire:1;
|
||||
bool pasid_valid:1;
|
||||
u32 pasid;
|
||||
};
|
||||
|
||||
struct irdma_sc_cq {
|
||||
@@ -426,6 +507,7 @@ struct irdma_sc_cq {
|
||||
u64 cq_pa;
|
||||
u64 shadow_area_pa;
|
||||
struct irdma_sc_dev *dev;
|
||||
u16 vsi_idx;
|
||||
struct irdma_sc_vsi *vsi;
|
||||
void *pbl_list;
|
||||
void *back_cq;
|
||||
@@ -477,8 +559,13 @@ struct irdma_sc_qp {
|
||||
bool virtual_map:1;
|
||||
bool flush_sq:1;
|
||||
bool flush_rq:1;
|
||||
bool err_sq_idx_valid:1;
|
||||
bool err_rq_idx_valid:1;
|
||||
u32 err_sq_idx;
|
||||
u32 err_rq_idx;
|
||||
bool sq_flush_code:1;
|
||||
bool rq_flush_code:1;
|
||||
u32 pkt_limit;
|
||||
enum irdma_flush_opcode flush_code;
|
||||
enum irdma_qp_event_type event_type;
|
||||
u8 term_flags;
|
||||
@@ -489,13 +576,13 @@ struct irdma_sc_qp {
|
||||
struct irdma_stats_inst_info {
|
||||
bool use_hmc_fcn_index;
|
||||
u8 hmc_fn_id;
|
||||
u8 stats_idx;
|
||||
u16 stats_idx;
|
||||
};
|
||||
|
||||
struct irdma_up_info {
|
||||
u8 map[8];
|
||||
u8 cnp_up_override;
|
||||
u8 hmc_fcn_idx;
|
||||
u16 hmc_fcn_idx;
|
||||
bool use_vlan:1;
|
||||
bool use_cnp_up_override:1;
|
||||
};
|
||||
@@ -518,6 +605,8 @@ struct irdma_ws_node_info {
|
||||
struct irdma_hmc_fpm_misc {
|
||||
u32 max_ceqs;
|
||||
u32 max_sds;
|
||||
u32 loc_mem_pages;
|
||||
u8 ird;
|
||||
u32 xf_block_size;
|
||||
u32 q1_block_size;
|
||||
u32 ht_multiplier;
|
||||
@@ -526,6 +615,7 @@ struct irdma_hmc_fpm_misc {
|
||||
u32 ooiscf_block_size;
|
||||
};
|
||||
|
||||
#define IRDMA_VCHNL_MAX_MSG_SIZE 512
|
||||
#define IRDMA_LEAF_DEFAULT_REL_BW 64
|
||||
#define IRDMA_PARENT_DEFAULT_REL_BW 1
|
||||
|
||||
@@ -601,19 +691,28 @@ struct irdma_sc_dev {
|
||||
u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
|
||||
struct irdma_hw_attrs hw_attrs;
|
||||
struct irdma_hmc_info *hmc_info;
|
||||
struct irdma_vchnl_rdma_caps vc_caps;
|
||||
u8 vc_recv_buf[IRDMA_VCHNL_MAX_MSG_SIZE];
|
||||
u16 vc_recv_len;
|
||||
struct irdma_sc_cqp *cqp;
|
||||
struct irdma_sc_aeq *aeq;
|
||||
struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
|
||||
struct irdma_sc_cq *ccq;
|
||||
const struct irdma_irq_ops *irq_ops;
|
||||
struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
|
||||
struct irdma_hmc_fpm_misc hmc_fpm_misc;
|
||||
struct irdma_ws_node *ws_tree_root;
|
||||
struct mutex ws_mutex; /* ws tree mutex */
|
||||
u32 vchnl_ver;
|
||||
u16 num_vfs;
|
||||
u8 hmc_fn_id;
|
||||
u16 hmc_fn_id;
|
||||
u8 vf_id;
|
||||
bool privileged:1;
|
||||
bool vchnl_up:1;
|
||||
bool ceq_valid:1;
|
||||
bool is_pf:1;
|
||||
u8 protocol_used;
|
||||
struct mutex vchnl_mutex; /* mutex to synchronize RDMA virtual channel messages */
|
||||
u8 pci_rev;
|
||||
int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
|
||||
void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
|
||||
@@ -632,6 +731,51 @@ struct irdma_modify_cq_info {
|
||||
bool cq_resize:1;
|
||||
};
|
||||
|
||||
struct irdma_srq_init_info {
|
||||
struct irdma_sc_pd *pd;
|
||||
struct irdma_sc_vsi *vsi;
|
||||
u64 srq_pa;
|
||||
u64 shadow_area_pa;
|
||||
u32 first_pm_pbl_idx;
|
||||
u32 pasid;
|
||||
u32 srq_size;
|
||||
u16 srq_limit;
|
||||
u8 pasid_valid;
|
||||
u8 wqe_size;
|
||||
u8 leaf_pbl_size;
|
||||
u8 virtual_map;
|
||||
u8 tph_en;
|
||||
u8 arm_limit_event;
|
||||
u8 tph_value;
|
||||
u8 pbl_chunk_size;
|
||||
struct irdma_srq_uk_init_info srq_uk_init_info;
|
||||
};
|
||||
|
||||
struct irdma_sc_srq {
|
||||
struct irdma_sc_dev *dev;
|
||||
struct irdma_sc_vsi *vsi;
|
||||
struct irdma_sc_pd *pd;
|
||||
struct irdma_srq_uk srq_uk;
|
||||
void *back_srq;
|
||||
u64 srq_pa;
|
||||
u64 shadow_area_pa;
|
||||
u32 first_pm_pbl_idx;
|
||||
u32 pasid;
|
||||
u32 hw_srq_size;
|
||||
u16 srq_limit;
|
||||
u8 pasid_valid;
|
||||
u8 leaf_pbl_size;
|
||||
u8 virtual_map;
|
||||
u8 tph_en;
|
||||
u8 arm_limit_event;
|
||||
u8 tph_val;
|
||||
};
|
||||
|
||||
struct irdma_modify_srq_info {
|
||||
u16 srq_limit;
|
||||
u8 arm_limit_event;
|
||||
};
|
||||
|
||||
struct irdma_create_qp_info {
|
||||
bool ord_valid:1;
|
||||
bool tcp_ctx_valid:1;
|
||||
@@ -671,7 +815,8 @@ struct irdma_ccq_cqe_info {
|
||||
u16 maj_err_code;
|
||||
u16 min_err_code;
|
||||
u8 op_code;
|
||||
bool error;
|
||||
bool error:1;
|
||||
bool pending:1;
|
||||
};
|
||||
|
||||
struct irdma_dcb_app_info {
|
||||
@@ -720,7 +865,7 @@ struct irdma_vsi_init_info {
|
||||
|
||||
struct irdma_vsi_stats_info {
|
||||
struct irdma_vsi_pestat *pestat;
|
||||
u8 fcn_id;
|
||||
u16 fcn_id;
|
||||
bool alloc_stats_inst;
|
||||
};
|
||||
|
||||
@@ -731,7 +876,8 @@ struct irdma_device_init_info {
|
||||
__le64 *fpm_commit_buf;
|
||||
struct irdma_hw *hw;
|
||||
void __iomem *bar0;
|
||||
u8 hmc_fn_id;
|
||||
enum irdma_protocol_used protocol_used;
|
||||
u16 hmc_fn_id;
|
||||
};
|
||||
|
||||
struct irdma_ceq_init_info {
|
||||
@@ -746,8 +892,8 @@ struct irdma_ceq_init_info {
|
||||
bool itr_no_expire:1;
|
||||
u8 pbl_chunk_size;
|
||||
u8 tph_val;
|
||||
u16 vsi_idx;
|
||||
u32 first_pm_pbl_idx;
|
||||
struct irdma_sc_vsi *vsi;
|
||||
struct irdma_sc_cq **reg_cq;
|
||||
u32 reg_cq_idx;
|
||||
};
|
||||
@@ -807,6 +953,8 @@ struct irdma_udp_offload_info {
|
||||
u32 cwnd;
|
||||
u8 rexmit_thresh;
|
||||
u8 rnr_nak_thresh;
|
||||
u8 rnr_nak_tmr;
|
||||
u8 min_rnr_timer;
|
||||
};
|
||||
|
||||
struct irdma_roce_offload_info {
|
||||
@@ -833,6 +981,7 @@ struct irdma_roce_offload_info {
|
||||
bool dctcp_en:1;
|
||||
bool fw_cc_enable:1;
|
||||
bool use_stats_inst:1;
|
||||
u8 local_ack_timeout;
|
||||
u16 t_high;
|
||||
u16 t_low;
|
||||
u8 last_byte_sent;
|
||||
@@ -933,8 +1082,10 @@ struct irdma_qp_host_ctx_info {
|
||||
};
|
||||
u32 send_cq_num;
|
||||
u32 rcv_cq_num;
|
||||
u32 srq_id;
|
||||
u32 rem_endpoint_idx;
|
||||
u8 stats_idx;
|
||||
u16 stats_idx;
|
||||
bool remote_atomics_en:1;
|
||||
bool srq_valid:1;
|
||||
bool tcp_info_valid:1;
|
||||
bool iwarp_info_valid:1;
|
||||
@@ -945,6 +1096,7 @@ struct irdma_qp_host_ctx_info {
|
||||
struct irdma_aeqe_info {
|
||||
u64 compl_ctx;
|
||||
u32 qp_cq_id;
|
||||
u32 def_info; /* only valid for DEF_CMPL */
|
||||
u16 ae_id;
|
||||
u16 wqe_idx;
|
||||
u8 tcp_state;
|
||||
@@ -953,9 +1105,11 @@ struct irdma_aeqe_info {
|
||||
bool cq:1;
|
||||
bool sq:1;
|
||||
bool rq:1;
|
||||
bool srq:1;
|
||||
bool in_rdrsp_wr:1;
|
||||
bool out_rdrsp:1;
|
||||
bool aeqe_overflow:1;
|
||||
bool err_rq_idx_valid:1;
|
||||
u8 q2_data_written;
|
||||
u8 ae_src;
|
||||
};
|
||||
@@ -972,7 +1126,8 @@ struct irdma_allocate_stag_info {
|
||||
bool use_hmc_fcn_index:1;
|
||||
bool use_pf_rid:1;
|
||||
bool all_memory:1;
|
||||
u8 hmc_fcn_index;
|
||||
bool remote_atomics_en:1;
|
||||
u16 hmc_fcn_index;
|
||||
};
|
||||
|
||||
struct irdma_mw_alloc_info {
|
||||
@@ -1000,6 +1155,7 @@ struct irdma_reg_ns_stag_info {
|
||||
u8 hmc_fcn_index;
|
||||
bool use_pf_rid:1;
|
||||
bool all_memory:1;
|
||||
bool remote_atomics_en:1;
|
||||
};
|
||||
|
||||
struct irdma_fast_reg_stag_info {
|
||||
@@ -1023,6 +1179,7 @@ struct irdma_fast_reg_stag_info {
|
||||
u8 hmc_fcn_index;
|
||||
bool use_pf_rid:1;
|
||||
bool defer_flag:1;
|
||||
bool remote_atomics_en:1;
|
||||
};
|
||||
|
||||
struct irdma_dealloc_stag_info {
|
||||
@@ -1130,6 +1287,8 @@ struct irdma_cqp_manage_push_page_info {
|
||||
};
|
||||
|
||||
struct irdma_qp_flush_info {
|
||||
u32 err_sq_idx;
|
||||
u32 err_rq_idx;
|
||||
u16 sq_minor_code;
|
||||
u16 sq_major_code;
|
||||
u16 rq_minor_code;
|
||||
@@ -1140,6 +1299,8 @@ struct irdma_qp_flush_info {
|
||||
bool rq:1;
|
||||
bool userflushcode:1;
|
||||
bool generate_ae:1;
|
||||
bool err_sq_idx_valid:1;
|
||||
bool err_rq_idx_valid:1;
|
||||
};
|
||||
|
||||
struct irdma_gen_ae_info {
|
||||
@@ -1189,6 +1350,11 @@ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_i
|
||||
void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
|
||||
void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
|
||||
struct irdma_sc_dev *dev);
|
||||
void irdma_sc_cqp_def_cmpl_ae_handler(struct irdma_sc_dev *dev,
|
||||
struct irdma_aeqe_info *info,
|
||||
bool first, u64 *scratch,
|
||||
u32 *sw_def_info);
|
||||
u64 irdma_sc_cqp_cleanup_handler(struct irdma_sc_dev *dev);
|
||||
int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
|
||||
int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
|
||||
int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
|
||||
@@ -1224,6 +1390,8 @@ void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *inf
|
||||
int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
|
||||
u8 hmc_fn_id, bool post_sq,
|
||||
bool poll_registers);
|
||||
int irdma_sc_srq_init(struct irdma_sc_srq *srq,
|
||||
struct irdma_srq_init_info *info);
|
||||
|
||||
void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
|
||||
struct cqp_info {
|
||||
@@ -1467,6 +1635,23 @@ struct cqp_info {
|
||||
struct irdma_dma_mem query_buff_mem;
|
||||
u64 scratch;
|
||||
} query_rdma;
|
||||
|
||||
struct {
|
||||
struct irdma_sc_srq *srq;
|
||||
u64 scratch;
|
||||
} srq_create;
|
||||
|
||||
struct {
|
||||
struct irdma_sc_srq *srq;
|
||||
struct irdma_modify_srq_info info;
|
||||
u64 scratch;
|
||||
} srq_modify;
|
||||
|
||||
struct {
|
||||
struct irdma_sc_srq *srq;
|
||||
u64 scratch;
|
||||
} srq_destroy;
|
||||
|
||||
} u;
|
||||
};
|
||||
|
||||
|
||||
@@ -78,8 +78,7 @@
|
||||
#define IRDMA_UDAQPC_IPID GENMASK_ULL(47, 32)
|
||||
#define IRDMA_UDAQPC_SNDMSS GENMASK_ULL(29, 16)
|
||||
#define IRDMA_UDAQPC_VLANTAG GENMASK_ULL(15, 0)
|
||||
|
||||
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(21, 20)
|
||||
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(27, 20)
|
||||
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO GENMASK_ULL(63, 48)
|
||||
#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX GENMASK_ULL(29, 24)
|
||||
#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX GENMASK_ULL(63, 48)
|
||||
@@ -94,7 +93,7 @@
|
||||
#define IRDMA_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(37, 32)
|
||||
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62)
|
||||
#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID BIT_ULL(59)
|
||||
#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(16, 0)
|
||||
#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(23, 0)
|
||||
#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG BIT_ULL(60)
|
||||
#define IRDMA_UDA_MGCTX_VFFLAG BIT_ULL(29)
|
||||
#define IRDMA_UDA_MGCTX_DESTPORT GENMASK_ULL(47, 32)
|
||||
|
||||
@@ -198,6 +198,26 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
|
||||
return wqe;
|
||||
}
|
||||
|
||||
__le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx)
|
||||
{
|
||||
int ret_code;
|
||||
__le64 *wqe;
|
||||
|
||||
if (IRDMA_RING_FULL_ERR(srq->srq_ring))
|
||||
return NULL;
|
||||
|
||||
IRDMA_ATOMIC_RING_MOVE_HEAD(srq->srq_ring, *wqe_idx, ret_code);
|
||||
if (ret_code)
|
||||
return NULL;
|
||||
|
||||
if (!*wqe_idx)
|
||||
srq->srwqe_polarity = !srq->srwqe_polarity;
|
||||
/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
|
||||
wqe = srq->srq_base[*wqe_idx * (srq->wqe_size_multiplier)].elem;
|
||||
|
||||
return wqe;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
|
||||
* @qp: hw qp ptr
|
||||
@@ -317,6 +337,160 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_uk_atomic_fetch_add - atomic fetch and add operation
|
||||
* @qp: hw qp ptr
|
||||
* @info: post sq information
|
||||
* @post_sq: flag to post sq
|
||||
*/
|
||||
int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
|
||||
struct irdma_post_sq_info *info, bool post_sq)
|
||||
{
|
||||
struct irdma_atomic_fetch_add *op_info;
|
||||
u32 total_size = 0;
|
||||
u16 quanta = 2;
|
||||
u32 wqe_idx;
|
||||
__le64 *wqe;
|
||||
u64 hdr;
|
||||
|
||||
op_info = &info->op.atomic_fetch_add;
|
||||
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
|
||||
info);
|
||||
if (!wqe)
|
||||
return -ENOMEM;
|
||||
|
||||
set_64bit_val(wqe, 0, op_info->tagged_offset);
|
||||
set_64bit_val(wqe, 8,
|
||||
FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
|
||||
set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
|
||||
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
|
||||
FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
|
||||
FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_FETCH_ADD) |
|
||||
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
|
||||
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
|
||||
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
|
||||
FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
|
||||
|
||||
set_64bit_val(wqe, 32, op_info->fetch_add_data_bytes);
|
||||
set_64bit_val(wqe, 40, 0);
|
||||
set_64bit_val(wqe, 48, 0);
|
||||
set_64bit_val(wqe, 56,
|
||||
FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
|
||||
|
||||
dma_wmb(); /* make sure WQE is populated before valid bit is set */
|
||||
|
||||
set_64bit_val(wqe, 24, hdr);
|
||||
|
||||
if (post_sq)
|
||||
irdma_uk_qp_post_wr(qp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_uk_atomic_compare_swap - atomic compare and swap operation
|
||||
* @qp: hw qp ptr
|
||||
* @info: post sq information
|
||||
* @post_sq: flag to post sq
|
||||
*/
|
||||
int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
|
||||
struct irdma_post_sq_info *info, bool post_sq)
|
||||
{
|
||||
struct irdma_atomic_compare_swap *op_info;
|
||||
u32 total_size = 0;
|
||||
u16 quanta = 2;
|
||||
u32 wqe_idx;
|
||||
__le64 *wqe;
|
||||
u64 hdr;
|
||||
|
||||
op_info = &info->op.atomic_compare_swap;
|
||||
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
|
||||
info);
|
||||
if (!wqe)
|
||||
return -ENOMEM;
|
||||
|
||||
set_64bit_val(wqe, 0, op_info->tagged_offset);
|
||||
set_64bit_val(wqe, 8,
|
||||
FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
|
||||
set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
|
||||
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
|
||||
FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
|
||||
FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD) |
|
||||
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
|
||||
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
|
||||
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
|
||||
FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
|
||||
|
||||
set_64bit_val(wqe, 32, op_info->swap_data_bytes);
|
||||
set_64bit_val(wqe, 40, op_info->compare_data_bytes);
|
||||
set_64bit_val(wqe, 48, 0);
|
||||
set_64bit_val(wqe, 56,
|
||||
FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
|
||||
|
||||
dma_wmb(); /* make sure WQE is populated before valid bit is set */
|
||||
|
||||
set_64bit_val(wqe, 24, hdr);
|
||||
|
||||
if (post_sq)
|
||||
irdma_uk_qp_post_wr(qp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_uk_srq_post_receive - post a receive wqe to a shared rq
|
||||
* @srq: shared rq ptr
|
||||
* @info: post rq information
|
||||
*/
|
||||
int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
|
||||
struct irdma_post_rq_info *info)
|
||||
{
|
||||
u32 wqe_idx, i, byte_off;
|
||||
u32 addl_frag_cnt;
|
||||
__le64 *wqe;
|
||||
u64 hdr;
|
||||
|
||||
if (srq->max_srq_frag_cnt < info->num_sges)
|
||||
return -EINVAL;
|
||||
|
||||
wqe = irdma_srq_get_next_recv_wqe(srq, &wqe_idx);
|
||||
if (!wqe)
|
||||
return -ENOMEM;
|
||||
|
||||
addl_frag_cnt = info->num_sges > 1 ? info->num_sges - 1 : 0;
|
||||
srq->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
|
||||
srq->srwqe_polarity);
|
||||
|
||||
for (i = 1, byte_off = 32; i < info->num_sges; i++) {
|
||||
srq->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
|
||||
srq->srwqe_polarity);
|
||||
byte_off += 16;
|
||||
}
|
||||
|
||||
/* if not an odd number set valid bit in next fragment */
|
||||
if (srq->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
|
||||
info->num_sges) {
|
||||
srq->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
|
||||
srq->srwqe_polarity);
|
||||
if (srq->uk_attrs->hw_rev == IRDMA_GEN_2)
|
||||
++addl_frag_cnt;
|
||||
}
|
||||
|
||||
set_64bit_val(wqe, 16, (u64)info->wr_id);
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
|
||||
FIELD_PREP(IRDMAQPSQ_VALID, srq->srwqe_polarity);
|
||||
|
||||
dma_wmb(); /* make sure WQE is populated before valid bit is set */
|
||||
|
||||
set_64bit_val(wqe, 24, hdr);
|
||||
|
||||
set_64bit_val(srq->shadow_area, 0, (wqe_idx + 1) % srq->srq_ring.size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_uk_rdma_read - rdma read command
|
||||
* @qp: hw qp ptr
|
||||
@@ -973,6 +1147,9 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
|
||||
u64 comp_ctx, qword0, qword2, qword3;
|
||||
__le64 *cqe;
|
||||
struct irdma_qp_uk *qp;
|
||||
struct irdma_srq_uk *srq;
|
||||
struct qp_err_code qp_err;
|
||||
u8 is_srq;
|
||||
struct irdma_ring *pring = NULL;
|
||||
u32 wqe_idx;
|
||||
int ret_code;
|
||||
@@ -1046,21 +1223,46 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
|
||||
}
|
||||
|
||||
info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
|
||||
is_srq = (u8)FIELD_GET(IRDMA_CQ_SRQ, qword3);
|
||||
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
|
||||
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
|
||||
get_64bit_val(cqe, 8, &comp_ctx);
|
||||
if (is_srq)
|
||||
get_64bit_val(cqe, 40, (u64 *)&qp);
|
||||
else
|
||||
qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
|
||||
if (info->error) {
|
||||
info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
|
||||
info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
|
||||
if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
|
||||
info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
|
||||
switch (info->major_err) {
|
||||
case IRDMA_SRQFLUSH_RSVD_MAJOR_ERR:
|
||||
qp_err = irdma_ae_to_qp_err_code(info->minor_err);
|
||||
info->minor_err = qp_err.flush_code;
|
||||
fallthrough;
|
||||
case IRDMA_FLUSH_MAJOR_ERR:
|
||||
/* Set the min error to standard flush error code for remaining cqes */
|
||||
if (info->minor_err != FLUSH_GENERAL_ERR) {
|
||||
qword3 &= ~IRDMA_CQ_MINERR;
|
||||
qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
|
||||
set_64bit_val(cqe, 24, qword3);
|
||||
}
|
||||
} else {
|
||||
info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
|
||||
info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
|
||||
break;
|
||||
default:
|
||||
#define IRDMA_CIE_SIGNATURE 0xE
|
||||
#define IRDMA_CQMAJERR_HIGH_NIBBLE GENMASK(15, 12)
|
||||
if (info->q_type == IRDMA_CQE_QTYPE_SQ &&
|
||||
qp->qp_type == IRDMA_QP_TYPE_ROCE_UD &&
|
||||
FIELD_GET(IRDMA_CQMAJERR_HIGH_NIBBLE, info->major_err)
|
||||
== IRDMA_CIE_SIGNATURE) {
|
||||
info->error = 0;
|
||||
info->major_err = 0;
|
||||
info->minor_err = 0;
|
||||
info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
|
||||
} else {
|
||||
info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
|
||||
}
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
|
||||
@@ -1069,7 +1271,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
|
||||
get_64bit_val(cqe, 0, &qword0);
|
||||
get_64bit_val(cqe, 16, &qword2);
|
||||
|
||||
info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
|
||||
info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
|
||||
info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
|
||||
|
||||
@@ -1085,7 +1286,22 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
|
||||
info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
|
||||
info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
|
||||
|
||||
if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
|
||||
if (info->q_type == IRDMA_CQE_QTYPE_RQ && is_srq) {
|
||||
srq = qp->srq_uk;
|
||||
|
||||
get_64bit_val(cqe, 8, &info->wr_id);
|
||||
info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
|
||||
|
||||
if (qword3 & IRDMACQ_STAG) {
|
||||
info->stag_invalid_set = true;
|
||||
info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG,
|
||||
qword2);
|
||||
} else {
|
||||
info->stag_invalid_set = false;
|
||||
}
|
||||
IRDMA_RING_MOVE_TAIL(srq->srq_ring);
|
||||
pring = &srq->srq_ring;
|
||||
} else if (info->q_type == IRDMA_CQE_QTYPE_RQ && !is_srq) {
|
||||
u32 array_idx;
|
||||
|
||||
array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
|
||||
@@ -1180,9 +1396,15 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
|
||||
ret_code = 0;
|
||||
|
||||
exit:
|
||||
if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
|
||||
if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
|
||||
if (pring && IRDMA_RING_MORE_WORK(*pring))
|
||||
move_cq_head = false;
|
||||
/* Park CQ head during a flush to generate additional CQEs
|
||||
* from SW for all unprocessed WQEs. For GEN3 and beyond
|
||||
* FW will generate/flush these CQEs so move to the next CQE
|
||||
*/
|
||||
move_cq_head = qp->uk_attrs->hw_rev <= IRDMA_GEN_2 ?
|
||||
false : true;
|
||||
}
|
||||
|
||||
if (move_cq_head) {
|
||||
IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
|
||||
@@ -1210,10 +1432,10 @@ exit:
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_qp_round_up - return round up qp wq depth
|
||||
* irdma_round_up_wq - return round up qp wq depth
|
||||
* @wqdepth: wq depth in quanta to round up
|
||||
*/
|
||||
static int irdma_qp_round_up(u32 wqdepth)
|
||||
static int irdma_round_up_wq(u32 wqdepth)
|
||||
{
|
||||
int scount = 1;
|
||||
|
||||
@@ -1268,7 +1490,7 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
|
||||
{
|
||||
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
|
||||
|
||||
*sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
|
||||
*sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
|
||||
|
||||
if (*sqdepth < min_size)
|
||||
*sqdepth = min_size;
|
||||
@@ -1290,7 +1512,7 @@ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
|
||||
{
|
||||
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
|
||||
|
||||
*rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
|
||||
*rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
|
||||
|
||||
if (*rqdepth < min_size)
|
||||
*rqdepth = min_size;
|
||||
@@ -1300,6 +1522,26 @@ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* irdma_get_srqdepth - get SRQ depth (quanta)
|
||||
* @uk_attrs: qp HW attributes
|
||||
* @srq_size: SRQ size
|
||||
* @shift: shift which determines size of WQE
|
||||
* @srqdepth: depth of SRQ
|
||||
*/
|
||||
int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
|
||||
u32 *srqdepth)
|
||||
{
|
||||
*srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD);
|
||||
|
||||
if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
|
||||
*srqdepth = uk_attrs->min_hw_wq_size << shift;
|
||||
else if (*srqdepth > uk_attrs->max_hw_srq_quanta)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
|
||||
.iw_copy_inline_data = irdma_copy_inline_data,
|
||||
.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
|
||||
@@ -1335,6 +1577,42 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
|
||||
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_uk_srq_init - initialize shared qp
|
||||
* @srq: hw srq (user and kernel)
|
||||
* @info: srq initialization info
|
||||
*
|
||||
* Initializes the vars used in both user and kernel mode.
|
||||
* The size of the wqe depends on number of max fragments
|
||||
* allowed. Then size of wqe * the number of wqes should be the
|
||||
* amount of memory allocated for srq.
|
||||
*/
|
||||
int irdma_uk_srq_init(struct irdma_srq_uk *srq,
|
||||
struct irdma_srq_uk_init_info *info)
|
||||
{
|
||||
u8 rqshift;
|
||||
|
||||
srq->uk_attrs = info->uk_attrs;
|
||||
if (info->max_srq_frag_cnt > srq->uk_attrs->max_hw_wq_frags)
|
||||
return -EINVAL;
|
||||
|
||||
irdma_get_wqe_shift(srq->uk_attrs, info->max_srq_frag_cnt, 0, &rqshift);
|
||||
srq->srq_caps = info->srq_caps;
|
||||
srq->srq_base = info->srq;
|
||||
srq->shadow_area = info->shadow_area;
|
||||
srq->srq_id = info->srq_id;
|
||||
srq->srwqe_polarity = 0;
|
||||
srq->srq_size = info->srq_size;
|
||||
srq->wqe_size = rqshift;
|
||||
srq->max_srq_frag_cnt = min(srq->uk_attrs->max_hw_wq_frags,
|
||||
((u32)2 << rqshift) - 1);
|
||||
IRDMA_RING_INIT(srq->srq_ring, srq->srq_size);
|
||||
srq->wqe_size_multiplier = 1 << rqshift;
|
||||
srq->wqe_ops = iw_wqe_uk_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
|
||||
* @ukinfo: qp initialization info
|
||||
@@ -1461,6 +1739,7 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
|
||||
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
|
||||
else
|
||||
qp->wqe_ops = iw_wqe_uk_ops;
|
||||
qp->srq_uk = info->srq_uk;
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
|
||||
@@ -41,10 +41,114 @@
|
||||
#define IRDMA_OP_TYPE_INV_STAG 0x0a
|
||||
#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
|
||||
#define IRDMA_OP_TYPE_NOP 0x0c
|
||||
#define IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD 0x0f
|
||||
#define IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP 0x11
|
||||
#define IRDMA_OP_TYPE_REC 0x3e
|
||||
#define IRDMA_OP_TYPE_REC_IMM 0x3f
|
||||
|
||||
#define IRDMA_FLUSH_MAJOR_ERR 1
|
||||
#define IRDMA_FLUSH_MAJOR_ERR 1
|
||||
#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
|
||||
|
||||
/* Async Events codes */
|
||||
#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
|
||||
#define IRDMA_AE_AMP_INVALID_STAG 0x0103
|
||||
#define IRDMA_AE_AMP_BAD_QP 0x0104
|
||||
#define IRDMA_AE_AMP_BAD_PD 0x0105
|
||||
#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
|
||||
#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
|
||||
#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
|
||||
#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
|
||||
#define IRDMA_AE_AMP_TO_WRAP 0x010a
|
||||
#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
|
||||
#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
|
||||
#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
|
||||
#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
|
||||
#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
|
||||
#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
|
||||
#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
|
||||
#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
|
||||
#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
|
||||
#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
|
||||
#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
|
||||
#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
|
||||
#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
|
||||
#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
|
||||
#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
|
||||
#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
|
||||
#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
|
||||
#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
|
||||
#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
|
||||
#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
|
||||
#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
|
||||
#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
|
||||
#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
|
||||
#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
|
||||
#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
|
||||
#define IRDMA_AE_BAD_CLOSE 0x0201
|
||||
#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
|
||||
#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
|
||||
#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
|
||||
#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
|
||||
#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
|
||||
#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
|
||||
#define IRDMA_AE_SRQ_LIMIT 0x0209
|
||||
#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
|
||||
#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
|
||||
#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
|
||||
#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
|
||||
#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
|
||||
#define IRDMA_AE_SRQ_CATASTROPHIC_ERROR 0x020f
|
||||
#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
|
||||
#define IRDMA_AE_ATOMIC_ALIGNMENT 0x0221
|
||||
#define IRDMA_AE_ATOMIC_MASK 0x0222
|
||||
#define IRDMA_AE_INVALID_REQUEST 0x0223
|
||||
#define IRDMA_AE_PCIE_ATOMIC_DISABLE 0x0224
|
||||
#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
|
||||
#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
|
||||
#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
|
||||
#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
|
||||
#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
|
||||
#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
|
||||
#define IRDMA_AE_DDP_NO_L_BIT 0x0308
|
||||
#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
|
||||
#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
|
||||
#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
|
||||
#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
|
||||
#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
|
||||
#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
|
||||
#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
|
||||
#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
|
||||
#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
|
||||
#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
|
||||
#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
|
||||
#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
|
||||
#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
|
||||
#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
|
||||
#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
|
||||
#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
|
||||
#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
|
||||
#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
|
||||
#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
|
||||
#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
|
||||
#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
|
||||
#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
|
||||
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
|
||||
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
|
||||
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
|
||||
#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
|
||||
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
|
||||
#define IRDMA_AE_RESET_SENT 0x0601
|
||||
#define IRDMA_AE_TERMINATE_SENT 0x0602
|
||||
#define IRDMA_AE_RESET_NOT_SENT 0x0603
|
||||
#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
|
||||
#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
|
||||
#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
|
||||
#define IRDMA_AE_REMOTE_QP_CATASTROPHIC 0x0703
|
||||
#define IRDMA_AE_LOCAL_QP_CATASTROPHIC 0x0704
|
||||
#define IRDMA_AE_RCE_QP_CATASTROPHIC 0x0705
|
||||
#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
|
||||
#define IRDMA_AE_CQP_DEFERRED_COMPLETE 0x0901
|
||||
#define IRDMA_AE_ADAPTER_CATASTROPHIC 0x0B0B
|
||||
|
||||
enum irdma_device_caps_const {
|
||||
IRDMA_WQE_SIZE = 4,
|
||||
@@ -55,11 +159,12 @@ enum irdma_device_caps_const {
|
||||
IRDMA_CEQE_SIZE = 1,
|
||||
IRDMA_CQP_CTX_SIZE = 8,
|
||||
IRDMA_SHADOW_AREA_SIZE = 8,
|
||||
IRDMA_QUERY_FPM_BUF_SIZE = 176,
|
||||
IRDMA_COMMIT_FPM_BUF_SIZE = 176,
|
||||
IRDMA_QUERY_FPM_BUF_SIZE = 192,
|
||||
IRDMA_COMMIT_FPM_BUF_SIZE = 192,
|
||||
IRDMA_GATHER_STATS_BUF_SIZE = 1024,
|
||||
IRDMA_MIN_IW_QP_ID = 0,
|
||||
IRDMA_MAX_IW_QP_ID = 262143,
|
||||
IRDMA_MIN_IW_SRQ_ID = 0,
|
||||
IRDMA_MIN_CEQID = 0,
|
||||
IRDMA_MAX_CEQID = 1023,
|
||||
IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
|
||||
@@ -67,6 +172,7 @@ enum irdma_device_caps_const {
|
||||
IRDMA_MAX_CQID = 524287,
|
||||
IRDMA_MIN_AEQ_ENTRIES = 1,
|
||||
IRDMA_MAX_AEQ_ENTRIES = 524287,
|
||||
IRDMA_MAX_AEQ_ENTRIES_GEN_3 = 262144,
|
||||
IRDMA_MIN_CEQ_ENTRIES = 1,
|
||||
IRDMA_MAX_CEQ_ENTRIES = 262143,
|
||||
IRDMA_MIN_CQ_SIZE = 1,
|
||||
@@ -105,6 +211,13 @@ enum irdma_flush_opcode {
|
||||
FLUSH_RETRY_EXC_ERR,
|
||||
FLUSH_MW_BIND_ERR,
|
||||
FLUSH_REM_INV_REQ_ERR,
|
||||
FLUSH_RNR_RETRY_EXC_ERR,
|
||||
};
|
||||
|
||||
enum irdma_qp_event_type {
|
||||
IRDMA_QP_EVENT_CATASTROPHIC,
|
||||
IRDMA_QP_EVENT_ACCESS_ERR,
|
||||
IRDMA_QP_EVENT_REQ_ERR,
|
||||
};
|
||||
|
||||
enum irdma_cmpl_status {
|
||||
@@ -147,6 +260,8 @@ enum irdma_qp_caps {
|
||||
IRDMA_PUSH_MODE = 8,
|
||||
};
|
||||
|
||||
struct irdma_srq_uk;
|
||||
struct irdma_srq_uk_init_info;
|
||||
struct irdma_qp_uk;
|
||||
struct irdma_cq_uk;
|
||||
struct irdma_qp_uk_init_info;
|
||||
@@ -201,6 +316,24 @@ struct irdma_bind_window {
|
||||
bool ena_writes:1;
|
||||
irdma_stag mw_stag;
|
||||
bool mem_window_type_1:1;
|
||||
bool remote_atomics_en:1;
|
||||
};
|
||||
|
||||
struct irdma_atomic_fetch_add {
|
||||
u64 tagged_offset;
|
||||
u64 remote_tagged_offset;
|
||||
u64 fetch_add_data_bytes;
|
||||
u32 stag;
|
||||
u32 remote_stag;
|
||||
};
|
||||
|
||||
struct irdma_atomic_compare_swap {
|
||||
u64 tagged_offset;
|
||||
u64 remote_tagged_offset;
|
||||
u64 swap_data_bytes;
|
||||
u64 compare_data_bytes;
|
||||
u32 stag;
|
||||
u32 remote_stag;
|
||||
};
|
||||
|
||||
struct irdma_inv_local_stag {
|
||||
@@ -219,6 +352,7 @@ struct irdma_post_sq_info {
|
||||
bool report_rtt:1;
|
||||
bool udp_hdr:1;
|
||||
bool defer_flag:1;
|
||||
bool remote_atomic_en:1;
|
||||
u32 imm_data;
|
||||
u32 stag_to_inv;
|
||||
union {
|
||||
@@ -227,6 +361,8 @@ struct irdma_post_sq_info {
|
||||
struct irdma_rdma_read rdma_read;
|
||||
struct irdma_bind_window bind_window;
|
||||
struct irdma_inv_local_stag inv_local_stag;
|
||||
struct irdma_atomic_fetch_add atomic_fetch_add;
|
||||
struct irdma_atomic_compare_swap atomic_compare_swap;
|
||||
} op;
|
||||
};
|
||||
|
||||
@@ -255,6 +391,15 @@ struct irdma_cq_poll_info {
|
||||
bool imm_valid:1;
|
||||
};
|
||||
|
||||
struct qp_err_code {
|
||||
enum irdma_flush_opcode flush_code;
|
||||
enum irdma_qp_event_type event_type;
|
||||
};
|
||||
|
||||
int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
|
||||
struct irdma_post_sq_info *info, bool post_sq);
|
||||
int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
|
||||
struct irdma_post_sq_info *info, bool post_sq);
|
||||
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
|
||||
struct irdma_post_sq_info *info, bool post_sq);
|
||||
int irdma_uk_inline_send(struct irdma_qp_uk *qp,
|
||||
@@ -300,6 +445,39 @@ int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
|
||||
u32 *sq_depth, u8 *sq_shift);
|
||||
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
|
||||
u32 *rq_depth, u8 *rq_shift);
|
||||
int irdma_uk_srq_init(struct irdma_srq_uk *srq,
|
||||
struct irdma_srq_uk_init_info *info);
|
||||
int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
|
||||
struct irdma_post_rq_info *info);
|
||||
|
||||
struct irdma_srq_uk {
|
||||
u32 srq_caps;
|
||||
struct irdma_qp_quanta *srq_base;
|
||||
struct irdma_uk_attrs *uk_attrs;
|
||||
__le64 *shadow_area;
|
||||
struct irdma_ring srq_ring;
|
||||
struct irdma_ring initial_ring;
|
||||
u32 srq_id;
|
||||
u32 srq_size;
|
||||
u32 max_srq_frag_cnt;
|
||||
struct irdma_wqe_uk_ops wqe_ops;
|
||||
u8 srwqe_polarity;
|
||||
u8 wqe_size;
|
||||
u8 wqe_size_multiplier;
|
||||
u8 deferred_flag;
|
||||
};
|
||||
|
||||
struct irdma_srq_uk_init_info {
|
||||
struct irdma_qp_quanta *srq;
|
||||
struct irdma_uk_attrs *uk_attrs;
|
||||
__le64 *shadow_area;
|
||||
u64 *srq_wrid_array;
|
||||
u32 srq_id;
|
||||
u32 srq_caps;
|
||||
u32 srq_size;
|
||||
u32 max_srq_frag_cnt;
|
||||
};
|
||||
|
||||
struct irdma_sq_uk_wr_trk_info {
|
||||
u64 wrid;
|
||||
u32 wr_len;
|
||||
@@ -344,6 +522,7 @@ struct irdma_qp_uk {
|
||||
bool destroy_pending:1; /* Indicates the QP is being destroyed */
|
||||
void *back_qp;
|
||||
u8 dbg_rq_flushed;
|
||||
struct irdma_srq_uk *srq_uk;
|
||||
u8 sq_flush_seen;
|
||||
u8 rq_flush_seen;
|
||||
};
|
||||
@@ -383,6 +562,7 @@ struct irdma_qp_uk_init_info {
|
||||
u8 rq_shift;
|
||||
int abi_ver;
|
||||
bool legacy_mode;
|
||||
struct irdma_srq_uk *srq_uk;
|
||||
};
|
||||
|
||||
struct irdma_cq_uk_init_info {
|
||||
@@ -398,6 +578,7 @@ struct irdma_cq_uk_init_info {
|
||||
__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
|
||||
u16 quanta, u32 total_size,
|
||||
struct irdma_post_sq_info *info);
|
||||
__le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx);
|
||||
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
|
||||
void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
|
||||
int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
|
||||
@@ -409,5 +590,85 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
|
||||
u32 *wqdepth);
|
||||
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
|
||||
u32 *wqdepth);
|
||||
int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
|
||||
u32 *srqdepth);
|
||||
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
|
||||
|
||||
static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
|
||||
{
|
||||
struct qp_err_code qp_err = {};
|
||||
|
||||
switch (ae_id) {
|
||||
case IRDMA_AE_AMP_BOUNDS_VIOLATION:
|
||||
case IRDMA_AE_AMP_INVALID_STAG:
|
||||
case IRDMA_AE_AMP_RIGHTS_VIOLATION:
|
||||
case IRDMA_AE_AMP_UNALLOCATED_STAG:
|
||||
case IRDMA_AE_AMP_BAD_PD:
|
||||
case IRDMA_AE_AMP_BAD_QP:
|
||||
case IRDMA_AE_AMP_BAD_STAG_KEY:
|
||||
case IRDMA_AE_AMP_BAD_STAG_INDEX:
|
||||
case IRDMA_AE_AMP_TO_WRAP:
|
||||
case IRDMA_AE_PRIV_OPERATION_DENIED:
|
||||
qp_err.flush_code = FLUSH_PROT_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
|
||||
break;
|
||||
case IRDMA_AE_UDA_XMIT_BAD_PD:
|
||||
case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
|
||||
qp_err.flush_code = FLUSH_LOC_QP_OP_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
|
||||
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
|
||||
case IRDMA_AE_UDA_L4LEN_INVALID:
|
||||
case IRDMA_AE_DDP_UBE_INVALID_MO:
|
||||
case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
|
||||
qp_err.flush_code = FLUSH_LOC_LEN_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
|
||||
case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
|
||||
qp_err.flush_code = FLUSH_REM_ACCESS_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
|
||||
break;
|
||||
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
|
||||
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
|
||||
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
|
||||
case IRDMA_AE_AMP_MWBIND_VALID_STAG:
|
||||
qp_err.flush_code = FLUSH_MW_BIND_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
|
||||
break;
|
||||
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
|
||||
qp_err.flush_code = FLUSH_RETRY_EXC_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_IB_INVALID_REQUEST:
|
||||
qp_err.flush_code = FLUSH_REM_INV_REQ_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_REQ_ERR;
|
||||
break;
|
||||
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
|
||||
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
|
||||
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
|
||||
case IRDMA_AE_IB_REMOTE_OP_ERROR:
|
||||
qp_err.flush_code = FLUSH_REM_OP_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_LLP_TOO_MANY_RNRS:
|
||||
qp_err.flush_code = FLUSH_RNR_RETRY_EXC_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_LCE_QP_CATASTROPHIC:
|
||||
case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
|
||||
case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
|
||||
case IRDMA_AE_RCE_QP_CATASTROPHIC:
|
||||
qp_err.flush_code = FLUSH_FATAL_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
default:
|
||||
qp_err.flush_code = FLUSH_GENERAL_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
}
|
||||
|
||||
return qp_err;
|
||||
}
|
||||
#endif /* IRDMA_USER_H */
|
||||
|
||||
@@ -481,6 +481,7 @@ void irdma_free_cqp_request(struct irdma_cqp *cqp,
|
||||
WRITE_ONCE(cqp_request->request_done, false);
|
||||
cqp_request->callback_fcn = NULL;
|
||||
cqp_request->waiting = false;
|
||||
cqp_request->pending = false;
|
||||
|
||||
spin_lock_irqsave(&cqp->req_lock, flags);
|
||||
list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
|
||||
@@ -520,6 +521,22 @@ irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
|
||||
irdma_put_cqp_request(cqp, cqp_request);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_cleanup_deferred_cqp_ops - clean-up cqp with no completions
|
||||
* @dev: sc_dev
|
||||
* @cqp: cqp
|
||||
*/
|
||||
static void irdma_cleanup_deferred_cqp_ops(struct irdma_sc_dev *dev,
|
||||
struct irdma_cqp *cqp)
|
||||
{
|
||||
u64 scratch;
|
||||
|
||||
/* process all CQP requests with deferred/pending completions */
|
||||
while ((scratch = irdma_sc_cqp_cleanup_handler(dev)))
|
||||
irdma_free_pending_cqp_request(cqp, (struct irdma_cqp_request *)
|
||||
(uintptr_t)scratch);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_cleanup_pending_cqp_op - clean-up cqp with no
|
||||
* completions
|
||||
@@ -533,6 +550,8 @@ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
|
||||
struct cqp_cmds_info *pcmdinfo = NULL;
|
||||
u32 i, pending_work, wqe_idx;
|
||||
|
||||
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
|
||||
irdma_cleanup_deferred_cqp_ops(dev, cqp);
|
||||
pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
|
||||
wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
|
||||
for (i = 0; i < pending_work; i++) {
|
||||
@@ -552,6 +571,26 @@ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
|
||||
}
|
||||
}
|
||||
|
||||
static int irdma_get_timeout_threshold(struct irdma_sc_dev *dev)
|
||||
{
|
||||
u16 time_s = dev->vc_caps.cqp_timeout_s;
|
||||
|
||||
if (!time_s)
|
||||
return CQP_TIMEOUT_THRESHOLD;
|
||||
|
||||
return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms;
|
||||
}
|
||||
|
||||
static int irdma_get_def_timeout_threshold(struct irdma_sc_dev *dev)
|
||||
{
|
||||
u16 time_s = dev->vc_caps.cqp_def_timeout_s;
|
||||
|
||||
if (!time_s)
|
||||
return CQP_DEF_CMPL_TIMEOUT_THRESHOLD;
|
||||
|
||||
return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_wait_event - wait for completion
|
||||
* @rf: RDMA PCI function
|
||||
@@ -561,6 +600,7 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
|
||||
struct irdma_cqp_request *cqp_request)
|
||||
{
|
||||
struct irdma_cqp_timeout cqp_timeout = {};
|
||||
int timeout_threshold = irdma_get_timeout_threshold(&rf->sc_dev);
|
||||
bool cqp_error = false;
|
||||
int err_code = 0;
|
||||
|
||||
@@ -572,9 +612,17 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
|
||||
msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
|
||||
break;
|
||||
|
||||
if (cqp_request->pending)
|
||||
/* There was a deferred or pending completion
|
||||
* received for this CQP request, so we need
|
||||
* to wait longer than usual.
|
||||
*/
|
||||
timeout_threshold =
|
||||
irdma_get_def_timeout_threshold(&rf->sc_dev);
|
||||
|
||||
irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
|
||||
|
||||
if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
|
||||
if (cqp_timeout.count < timeout_threshold)
|
||||
continue;
|
||||
|
||||
if (!rf->reset) {
|
||||
@@ -649,6 +697,9 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
|
||||
[IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
|
||||
[IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
|
||||
[IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
|
||||
[IRDMA_OP_SRQ_CREATE] = "Create SRQ Cmd",
|
||||
[IRDMA_OP_SRQ_MODIFY] = "Modify SRQ Cmd",
|
||||
[IRDMA_OP_SRQ_DESTROY] = "Destroy SRQ Cmd",
|
||||
};
|
||||
|
||||
static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
|
||||
@@ -1065,6 +1116,26 @@ static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
|
||||
irdma_put_cqp_request(&rf->cqp, cqp_request);
|
||||
}
|
||||
|
||||
static void irdma_free_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 qp_num)
|
||||
{
|
||||
struct irdma_device *iwdev = iwqp->iwdev;
|
||||
struct irdma_pci_f *rf = iwdev->rf;
|
||||
unsigned long flags;
|
||||
|
||||
if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
|
||||
return;
|
||||
|
||||
irdma_vchnl_req_del_vport(&rf->sc_dev, iwdev->vport_id, qp_num);
|
||||
|
||||
if (qp_num == 1) {
|
||||
spin_lock_irqsave(&rf->rsrc_lock, flags);
|
||||
rf->hwqp1_rsvd = false;
|
||||
spin_unlock_irqrestore(&rf->rsrc_lock, flags);
|
||||
} else if (qp_num > 2) {
|
||||
irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_free_qp_rsrc - free up memory resources for qp
|
||||
* @iwqp: qp ptr (user or kernel)
|
||||
@@ -1073,7 +1144,7 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
|
||||
{
|
||||
struct irdma_device *iwdev = iwqp->iwdev;
|
||||
struct irdma_pci_f *rf = iwdev->rf;
|
||||
u32 qp_num = iwqp->ibqp.qp_num;
|
||||
u32 qp_num = iwqp->sc_qp.qp_uk.qp_id;
|
||||
|
||||
irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
|
||||
irdma_dealloc_push_page(rf, &iwqp->sc_qp);
|
||||
@@ -1083,8 +1154,12 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
|
||||
iwqp->sc_qp.user_pri);
|
||||
}
|
||||
|
||||
if (qp_num > 2)
|
||||
irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
|
||||
if (iwqp->ibqp.qp_type == IB_QPT_GSI) {
|
||||
irdma_free_gsi_qp_rsrc(iwqp, qp_num);
|
||||
} else {
|
||||
if (qp_num > 2)
|
||||
irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
|
||||
}
|
||||
dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
|
||||
iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
|
||||
iwqp->q2_ctx_mem.va = NULL;
|
||||
@@ -1095,6 +1170,30 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
|
||||
kfree(iwqp->kqp.rq_wrid_mem);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_srq_wq_destroy - send srq destroy cqp
|
||||
* @rf: RDMA PCI function
|
||||
* @srq: hardware control srq
|
||||
*/
|
||||
void irdma_srq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_srq *srq)
|
||||
{
|
||||
struct irdma_cqp_request *cqp_request;
|
||||
struct cqp_cmds_info *cqp_info;
|
||||
|
||||
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
|
||||
if (!cqp_request)
|
||||
return;
|
||||
|
||||
cqp_info = &cqp_request->info;
|
||||
cqp_info->cqp_cmd = IRDMA_OP_SRQ_DESTROY;
|
||||
cqp_info->post_sq = 1;
|
||||
cqp_info->in.u.srq_destroy.srq = srq;
|
||||
cqp_info->in.u.srq_destroy.scratch = (uintptr_t)cqp_request;
|
||||
|
||||
irdma_handle_cqp_op(rf, cqp_request);
|
||||
irdma_put_cqp_request(&rf->cqp, cqp_request);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_cq_wq_destroy - send cq destroy cqp
|
||||
* @rf: RDMA PCI function
|
||||
@@ -2266,7 +2365,10 @@ bool irdma_cq_empty(struct irdma_cq *iwcq)
|
||||
u8 polarity;
|
||||
|
||||
ukcq = &iwcq->sc_cq.cq_uk;
|
||||
cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
|
||||
if (ukcq->avoid_mem_cflct)
|
||||
cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(ukcq);
|
||||
else
|
||||
cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
|
||||
get_64bit_val(cqe, 24, &qword3);
|
||||
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,6 +8,7 @@
|
||||
|
||||
#define IRDMA_PKEY_TBL_SZ 1
|
||||
#define IRDMA_DEFAULT_PKEY 0xFFFF
|
||||
#define IRDMA_SHADOW_PGCNT 1
|
||||
|
||||
struct irdma_ucontext {
|
||||
struct ib_ucontext ibucontext;
|
||||
@@ -17,6 +18,8 @@ struct irdma_ucontext {
|
||||
spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
|
||||
struct list_head qp_reg_mem_list;
|
||||
spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
|
||||
struct list_head srq_reg_mem_list;
|
||||
spinlock_t srq_reg_mem_list_lock; /* protect SRQ memory list */
|
||||
int abi_ver;
|
||||
u8 legacy_mode : 1;
|
||||
u8 use_raw_attrs : 1;
|
||||
@@ -65,10 +68,16 @@ struct irdma_cq_mr {
|
||||
bool split;
|
||||
};
|
||||
|
||||
struct irdma_srq_mr {
|
||||
struct irdma_hmc_pble srq_pbl;
|
||||
dma_addr_t shadow;
|
||||
};
|
||||
|
||||
struct irdma_qp_mr {
|
||||
struct irdma_hmc_pble sq_pbl;
|
||||
struct irdma_hmc_pble rq_pbl;
|
||||
dma_addr_t shadow;
|
||||
dma_addr_t rq_pa;
|
||||
struct page *sq_page;
|
||||
};
|
||||
|
||||
@@ -85,6 +94,7 @@ struct irdma_pbl {
|
||||
union {
|
||||
struct irdma_qp_mr qp_mr;
|
||||
struct irdma_cq_mr cq_mr;
|
||||
struct irdma_srq_mr srq_mr;
|
||||
};
|
||||
|
||||
bool pbl_allocated:1;
|
||||
@@ -112,24 +122,33 @@ struct irdma_mr {
|
||||
struct irdma_pbl iwpbl;
|
||||
};
|
||||
|
||||
struct irdma_srq {
|
||||
struct ib_srq ibsrq;
|
||||
struct irdma_sc_srq sc_srq __aligned(64);
|
||||
struct irdma_dma_mem kmem;
|
||||
u64 *srq_wrid_mem;
|
||||
refcount_t refcnt;
|
||||
spinlock_t lock; /* for poll srq */
|
||||
struct irdma_pbl *iwpbl;
|
||||
struct irdma_sge *sg_list;
|
||||
u16 srq_head;
|
||||
u32 srq_num;
|
||||
u32 max_wr;
|
||||
bool user_mode:1;
|
||||
};
|
||||
|
||||
struct irdma_cq {
|
||||
struct ib_cq ibcq;
|
||||
struct irdma_sc_cq sc_cq;
|
||||
u16 cq_head;
|
||||
u16 cq_size;
|
||||
u16 cq_num;
|
||||
bool user_mode;
|
||||
atomic_t armed;
|
||||
enum irdma_cmpl_notify last_notify;
|
||||
u32 polled_cmpls;
|
||||
u32 cq_mem_size;
|
||||
struct irdma_dma_mem kmem;
|
||||
struct irdma_dma_mem kmem_shadow;
|
||||
struct completion free_cq;
|
||||
refcount_t refcnt;
|
||||
spinlock_t lock; /* for poll cq */
|
||||
struct irdma_pbl *iwpbl;
|
||||
struct irdma_pbl *iwpbl_shadow;
|
||||
struct list_head resize_list;
|
||||
struct irdma_cq_poll_info cur_cqe;
|
||||
struct list_head cmpl_generated;
|
||||
@@ -259,6 +278,12 @@ static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
|
||||
case IRDMA_OP_TYPE_FAST_REG_NSMR:
|
||||
entry->opcode = IB_WC_REG_MR;
|
||||
break;
|
||||
case IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP:
|
||||
entry->opcode = IB_WC_COMP_SWAP;
|
||||
break;
|
||||
case IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD:
|
||||
entry->opcode = IB_WC_FETCH_ADD;
|
||||
break;
|
||||
case IRDMA_OP_TYPE_INV_STAG:
|
||||
entry->opcode = IB_WC_LOCAL_INV;
|
||||
break;
|
||||
@@ -267,6 +292,19 @@ static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
|
||||
}
|
||||
}
|
||||
|
||||
static inline void set_ib_wc_op_rq_gen_3(struct irdma_cq_poll_info *info,
|
||||
struct ib_wc *entry)
|
||||
{
|
||||
switch (info->op_type) {
|
||||
case IRDMA_OP_TYPE_RDMA_WRITE:
|
||||
case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
|
||||
entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
|
||||
break;
|
||||
default:
|
||||
entry->opcode = IB_WC_RECV;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
|
||||
struct ib_wc *entry, bool send_imm_support)
|
||||
{
|
||||
|
||||
618
drivers/infiniband/hw/irdma/virtchnl.c
Normal file
618
drivers/infiniband/hw/irdma/virtchnl.c
Normal file
@@ -0,0 +1,618 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
/* Copyright (c) 2015 - 2024 Intel Corporation */
|
||||
|
||||
#include "osdep.h"
|
||||
#include "hmc.h"
|
||||
#include "defs.h"
|
||||
#include "type.h"
|
||||
#include "protos.h"
|
||||
#include "virtchnl.h"
|
||||
#include "ws.h"
|
||||
#include "i40iw_hw.h"
|
||||
#include "ig3rdma_hw.h"
|
||||
|
||||
struct vchnl_reg_map_elem {
|
||||
u16 reg_id;
|
||||
u16 reg_idx;
|
||||
bool pg_rel;
|
||||
};
|
||||
|
||||
struct vchnl_regfld_map_elem {
|
||||
u16 regfld_id;
|
||||
u16 regfld_idx;
|
||||
};
|
||||
|
||||
static struct vchnl_reg_map_elem vchnl_reg_map[] = {
|
||||
{IRDMA_VCHNL_REG_ID_CQPTAIL, IRDMA_CQPTAIL, false},
|
||||
{IRDMA_VCHNL_REG_ID_CQPDB, IRDMA_CQPDB, false},
|
||||
{IRDMA_VCHNL_REG_ID_CCQPSTATUS, IRDMA_CCQPSTATUS, false},
|
||||
{IRDMA_VCHNL_REG_ID_CCQPHIGH, IRDMA_CCQPHIGH, false},
|
||||
{IRDMA_VCHNL_REG_ID_CCQPLOW, IRDMA_CCQPLOW, false},
|
||||
{IRDMA_VCHNL_REG_ID_CQARM, IRDMA_CQARM, false},
|
||||
{IRDMA_VCHNL_REG_ID_CQACK, IRDMA_CQACK, false},
|
||||
{IRDMA_VCHNL_REG_ID_AEQALLOC, IRDMA_AEQALLOC, false},
|
||||
{IRDMA_VCHNL_REG_ID_CQPERRCODES, IRDMA_CQPERRCODES, false},
|
||||
{IRDMA_VCHNL_REG_ID_WQEALLOC, IRDMA_WQEALLOC, false},
|
||||
{IRDMA_VCHNL_REG_ID_DB_ADDR_OFFSET, IRDMA_DB_ADDR_OFFSET, false },
|
||||
{IRDMA_VCHNL_REG_ID_DYN_CTL, IRDMA_GLINT_DYN_CTL, false },
|
||||
{IRDMA_VCHNL_REG_INV_ID, IRDMA_VCHNL_REG_INV_ID, false }
|
||||
};
|
||||
|
||||
static struct vchnl_regfld_map_elem vchnl_regfld_map[] = {
|
||||
{IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CQP_OP_ERR, IRDMA_CCQPSTATUS_CCQP_ERR_M},
|
||||
{IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CCQP_DONE, IRDMA_CCQPSTATUS_CCQP_DONE_M},
|
||||
{IRDMA_VCHNL_REGFLD_ID_CQPSQ_STAG_PDID, IRDMA_CQPSQ_STAG_PDID_M},
|
||||
{IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CEQID, IRDMA_CQPSQ_CQ_CEQID_M},
|
||||
{IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CQID, IRDMA_CQPSQ_CQ_CQID_M},
|
||||
{IRDMA_VCHNL_REGFLD_ID_COMMIT_FPM_CQCNT, IRDMA_COMMIT_FPM_CQCNT_M},
|
||||
{IRDMA_VCHNL_REGFLD_ID_UPESD_HMCN_ID, IRDMA_CQPSQ_UPESD_HMCFNID_M},
|
||||
{IRDMA_VCHNL_REGFLD_INV_ID, IRDMA_VCHNL_REGFLD_INV_ID}
|
||||
};
|
||||
|
||||
#define IRDMA_VCHNL_REG_COUNT ARRAY_SIZE(vchnl_reg_map)
|
||||
#define IRDMA_VCHNL_REGFLD_COUNT ARRAY_SIZE(vchnl_regfld_map)
|
||||
#define IRDMA_VCHNL_REGFLD_BUF_SIZE \
|
||||
(IRDMA_VCHNL_REG_COUNT * sizeof(struct irdma_vchnl_reg_info) + \
|
||||
IRDMA_VCHNL_REGFLD_COUNT * sizeof(struct irdma_vchnl_reg_field_info))
|
||||
#define IRDMA_REGMAP_RESP_BUF_SIZE (IRDMA_VCHNL_RESP_MIN_SIZE + IRDMA_VCHNL_REGFLD_BUF_SIZE)
|
||||
|
||||
/**
|
||||
* irdma_sc_vchnl_init - Initialize dev virtchannel and get hw_rev
|
||||
* @dev: dev structure to update
|
||||
* @info: virtchannel info parameters to fill into the dev structure
|
||||
*/
|
||||
int irdma_sc_vchnl_init(struct irdma_sc_dev *dev,
|
||||
struct irdma_vchnl_init_info *info)
|
||||
{
|
||||
dev->vchnl_up = true;
|
||||
dev->privileged = info->privileged;
|
||||
dev->is_pf = info->is_pf;
|
||||
dev->hw_attrs.uk_attrs.hw_rev = info->hw_rev;
|
||||
|
||||
if (!dev->privileged) {
|
||||
int ret = irdma_vchnl_req_get_ver(dev, IRDMA_VCHNL_CHNL_VER_MAX,
|
||||
&dev->vchnl_ver);
|
||||
|
||||
ibdev_dbg(to_ibdev(dev),
|
||||
"DEV: Get Channel version ret = %d, version is %u\n",
|
||||
ret, dev->vchnl_ver);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = irdma_vchnl_req_get_caps(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev->hw_attrs.uk_attrs.hw_rev = dev->vc_caps.hw_rev;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_vchnl_req_verify_resp - Verify requested response size
|
||||
* @vchnl_req: vchnl message requested
|
||||
* @resp_len: response length sent from vchnl peer
|
||||
*/
|
||||
static int irdma_vchnl_req_verify_resp(struct irdma_vchnl_req *vchnl_req,
|
||||
u16 resp_len)
|
||||
{
|
||||
switch (vchnl_req->vchnl_msg->op_code) {
|
||||
case IRDMA_VCHNL_OP_GET_VER:
|
||||
case IRDMA_VCHNL_OP_GET_HMC_FCN:
|
||||
case IRDMA_VCHNL_OP_PUT_HMC_FCN:
|
||||
if (resp_len != vchnl_req->parm_len)
|
||||
return -EBADMSG;
|
||||
break;
|
||||
case IRDMA_VCHNL_OP_GET_RDMA_CAPS:
|
||||
if (resp_len < IRDMA_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE)
|
||||
return -EBADMSG;
|
||||
break;
|
||||
case IRDMA_VCHNL_OP_GET_REG_LAYOUT:
|
||||
case IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP:
|
||||
case IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP:
|
||||
case IRDMA_VCHNL_OP_ADD_VPORT:
|
||||
case IRDMA_VCHNL_OP_DEL_VPORT:
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void irdma_free_vchnl_req_msg(struct irdma_vchnl_req *vchnl_req)
|
||||
{
|
||||
kfree(vchnl_req->vchnl_msg);
|
||||
}
|
||||
|
||||
static int irdma_alloc_vchnl_req_msg(struct irdma_vchnl_req *vchnl_req,
|
||||
struct irdma_vchnl_req_init_info *info)
|
||||
{
|
||||
struct irdma_vchnl_op_buf *vchnl_msg;
|
||||
|
||||
vchnl_msg = kzalloc(IRDMA_VCHNL_MAX_MSG_SIZE, GFP_KERNEL);
|
||||
|
||||
if (!vchnl_msg)
|
||||
return -ENOMEM;
|
||||
|
||||
vchnl_msg->op_ctx = (uintptr_t)vchnl_req;
|
||||
vchnl_msg->buf_len = sizeof(*vchnl_msg) + info->req_parm_len;
|
||||
if (info->req_parm_len)
|
||||
memcpy(vchnl_msg->buf, info->req_parm, info->req_parm_len);
|
||||
vchnl_msg->op_code = info->op_code;
|
||||
vchnl_msg->op_ver = info->op_ver;
|
||||
|
||||
vchnl_req->vchnl_msg = vchnl_msg;
|
||||
vchnl_req->parm = info->resp_parm;
|
||||
vchnl_req->parm_len = info->resp_parm_len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int irdma_vchnl_req_send_sync(struct irdma_sc_dev *dev,
|
||||
struct irdma_vchnl_req_init_info *info)
|
||||
{
|
||||
u16 resp_len = sizeof(dev->vc_recv_buf);
|
||||
struct irdma_vchnl_req vchnl_req = {};
|
||||
u16 msg_len;
|
||||
u8 *msg;
|
||||
int ret;
|
||||
|
||||
ret = irdma_alloc_vchnl_req_msg(&vchnl_req, info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
msg_len = vchnl_req.vchnl_msg->buf_len;
|
||||
msg = (u8 *)vchnl_req.vchnl_msg;
|
||||
|
||||
mutex_lock(&dev->vchnl_mutex);
|
||||
ret = ig3rdma_vchnl_send_sync(dev, msg, msg_len, dev->vc_recv_buf,
|
||||
&resp_len);
|
||||
dev->vc_recv_len = resp_len;
|
||||
if (ret)
|
||||
goto exit;
|
||||
|
||||
ret = irdma_vchnl_req_get_resp(dev, &vchnl_req);
|
||||
exit:
|
||||
mutex_unlock(&dev->vchnl_mutex);
|
||||
ibdev_dbg(to_ibdev(dev),
|
||||
"VIRT: virtual channel send %s caller: %pS ret=%d op=%u op_ver=%u req_len=%u parm_len=%u resp_len=%u\n",
|
||||
!ret ? "SUCCEEDS" : "FAILS", __builtin_return_address(0),
|
||||
ret, vchnl_req.vchnl_msg->op_code,
|
||||
vchnl_req.vchnl_msg->op_ver, vchnl_req.vchnl_msg->buf_len,
|
||||
vchnl_req.parm_len, vchnl_req.resp_len);
|
||||
irdma_free_vchnl_req_msg(&vchnl_req);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_vchnl_req_get_reg_layout - Get Register Layout
|
||||
* @dev: RDMA device pointer
|
||||
*/
|
||||
int irdma_vchnl_req_get_reg_layout(struct irdma_sc_dev *dev)
|
||||
{
|
||||
u16 reg_idx, reg_id, tmp_reg_id, regfld_idx, regfld_id, tmp_regfld_id;
|
||||
struct irdma_vchnl_reg_field_info *regfld_array = NULL;
|
||||
u8 resp_buffer[IRDMA_REGMAP_RESP_BUF_SIZE] = {};
|
||||
struct vchnl_regfld_map_elem *regfld_map_array;
|
||||
struct irdma_vchnl_req_init_info info = {};
|
||||
struct vchnl_reg_map_elem *reg_map_array;
|
||||
struct irdma_vchnl_reg_info *reg_array;
|
||||
u8 num_bits, shift_cnt;
|
||||
u16 buf_len = 0;
|
||||
u64 bitmask;
|
||||
u32 rindex;
|
||||
int ret;
|
||||
|
||||
if (!dev->vchnl_up)
|
||||
return -EBUSY;
|
||||
|
||||
info.op_code = IRDMA_VCHNL_OP_GET_REG_LAYOUT;
|
||||
info.op_ver = IRDMA_VCHNL_OP_GET_REG_LAYOUT_V0;
|
||||
info.resp_parm = resp_buffer;
|
||||
info.resp_parm_len = sizeof(resp_buffer);
|
||||
|
||||
ret = irdma_vchnl_req_send_sync(dev, &info);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* parse the response buffer and update reg info*/
|
||||
/* Parse registers till invalid */
|
||||
/* Parse register fields till invalid */
|
||||
reg_array = (struct irdma_vchnl_reg_info *)resp_buffer;
|
||||
for (rindex = 0; rindex < IRDMA_VCHNL_REG_COUNT; rindex++) {
|
||||
buf_len += sizeof(struct irdma_vchnl_reg_info);
|
||||
if (buf_len >= sizeof(resp_buffer))
|
||||
return -ENOMEM;
|
||||
|
||||
regfld_array =
|
||||
(struct irdma_vchnl_reg_field_info *)®_array[rindex + 1];
|
||||
reg_id = reg_array[rindex].reg_id;
|
||||
if (reg_id == IRDMA_VCHNL_REG_INV_ID)
|
||||
break;
|
||||
|
||||
reg_id &= ~IRDMA_VCHNL_REG_PAGE_REL;
|
||||
if (reg_id >= IRDMA_VCHNL_REG_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
/* search regmap for register index in hw_regs.*/
|
||||
reg_map_array = vchnl_reg_map;
|
||||
do {
|
||||
tmp_reg_id = reg_map_array->reg_id;
|
||||
if (tmp_reg_id == reg_id)
|
||||
break;
|
||||
|
||||
reg_map_array++;
|
||||
} while (tmp_reg_id != IRDMA_VCHNL_REG_INV_ID);
|
||||
if (tmp_reg_id != reg_id)
|
||||
continue;
|
||||
|
||||
reg_idx = reg_map_array->reg_idx;
|
||||
|
||||
/* Page relative, DB Offset do not need bar offset */
|
||||
if (reg_idx == IRDMA_DB_ADDR_OFFSET ||
|
||||
(reg_array[rindex].reg_id & IRDMA_VCHNL_REG_PAGE_REL)) {
|
||||
dev->hw_regs[reg_idx] =
|
||||
(u32 __iomem *)(uintptr_t)reg_array[rindex].reg_offset;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Update the local HW struct */
|
||||
dev->hw_regs[reg_idx] = ig3rdma_get_reg_addr(dev->hw,
|
||||
reg_array[rindex].reg_offset);
|
||||
if (!dev->hw_regs[reg_idx])
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!regfld_array)
|
||||
return -ENOMEM;
|
||||
|
||||
/* set up doorbell variables using mapped DB page */
|
||||
dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
|
||||
dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
|
||||
dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
|
||||
dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
|
||||
dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
|
||||
|
||||
for (rindex = 0; rindex < IRDMA_VCHNL_REGFLD_COUNT; rindex++) {
|
||||
buf_len += sizeof(struct irdma_vchnl_reg_field_info);
|
||||
if ((buf_len - 1) > sizeof(resp_buffer))
|
||||
break;
|
||||
|
||||
if (regfld_array[rindex].fld_id == IRDMA_VCHNL_REGFLD_INV_ID)
|
||||
break;
|
||||
|
||||
regfld_id = regfld_array[rindex].fld_id;
|
||||
regfld_map_array = vchnl_regfld_map;
|
||||
do {
|
||||
tmp_regfld_id = regfld_map_array->regfld_id;
|
||||
if (tmp_regfld_id == regfld_id)
|
||||
break;
|
||||
|
||||
regfld_map_array++;
|
||||
} while (tmp_regfld_id != IRDMA_VCHNL_REGFLD_INV_ID);
|
||||
|
||||
if (tmp_regfld_id != regfld_id)
|
||||
continue;
|
||||
|
||||
regfld_idx = regfld_map_array->regfld_idx;
|
||||
|
||||
num_bits = regfld_array[rindex].fld_bits;
|
||||
shift_cnt = regfld_array[rindex].fld_shift;
|
||||
if ((num_bits + shift_cnt > 64) || !num_bits) {
|
||||
ibdev_dbg(to_ibdev(dev),
|
||||
"ERR: Invalid field mask id %d bits %d shift %d",
|
||||
regfld_id, num_bits, shift_cnt);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
bitmask = (1ULL << num_bits) - 1;
|
||||
dev->hw_masks[regfld_idx] = bitmask << shift_cnt;
|
||||
dev->hw_shifts[regfld_idx] = shift_cnt;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int irdma_vchnl_req_add_vport(struct irdma_sc_dev *dev, u16 vport_id,
|
||||
u32 qp1_id, struct irdma_qos *qos)
|
||||
{
|
||||
struct irdma_vchnl_resp_vport_info resp_vport = { 0 };
|
||||
struct irdma_vchnl_req_vport_info req_vport = { 0 };
|
||||
struct irdma_vchnl_req_init_info info = { 0 };
|
||||
int ret, i;
|
||||
|
||||
if (!dev->vchnl_up)
|
||||
return -EBUSY;
|
||||
|
||||
info.op_code = IRDMA_VCHNL_OP_ADD_VPORT;
|
||||
info.op_ver = IRDMA_VCHNL_OP_ADD_VPORT_V0;
|
||||
req_vport.vport_id = vport_id;
|
||||
req_vport.qp1_id = qp1_id;
|
||||
info.req_parm_len = sizeof(req_vport);
|
||||
info.req_parm = &req_vport;
|
||||
info.resp_parm = &resp_vport;
|
||||
info.resp_parm_len = sizeof(resp_vport);
|
||||
|
||||
ret = irdma_vchnl_req_send_sync(dev, &info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
|
||||
qos[i].qs_handle = resp_vport.qs_handle[i];
|
||||
qos[i].valid = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int irdma_vchnl_req_del_vport(struct irdma_sc_dev *dev, u16 vport_id, u32 qp1_id)
|
||||
{
|
||||
struct irdma_vchnl_req_init_info info = { 0 };
|
||||
struct irdma_vchnl_req_vport_info req_vport = { 0 };
|
||||
|
||||
if (!dev->vchnl_up)
|
||||
return -EBUSY;
|
||||
|
||||
info.op_code = IRDMA_VCHNL_OP_DEL_VPORT;
|
||||
info.op_ver = IRDMA_VCHNL_OP_DEL_VPORT_V0;
|
||||
req_vport.vport_id = vport_id;
|
||||
req_vport.qp1_id = qp1_id;
|
||||
info.req_parm_len = sizeof(req_vport);
|
||||
info.req_parm = &req_vport;
|
||||
|
||||
return irdma_vchnl_req_send_sync(dev, &info);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_vchnl_req_aeq_vec_map - Map AEQ to vector on this function
|
||||
* @dev: RDMA device pointer
|
||||
* @v_idx: vector index
|
||||
*/
|
||||
int irdma_vchnl_req_aeq_vec_map(struct irdma_sc_dev *dev, u32 v_idx)
|
||||
{
|
||||
struct irdma_vchnl_req_init_info info = {};
|
||||
struct irdma_vchnl_qvlist_info *qvl;
|
||||
struct irdma_vchnl_qv_info *qv;
|
||||
u16 qvl_size, num_vectors = 1;
|
||||
int ret;
|
||||
|
||||
if (!dev->vchnl_up)
|
||||
return -EBUSY;
|
||||
|
||||
qvl_size = struct_size(qvl, qv_info, num_vectors);
|
||||
|
||||
qvl = kzalloc(qvl_size, GFP_KERNEL);
|
||||
if (!qvl)
|
||||
return -ENOMEM;
|
||||
|
||||
qvl->num_vectors = 1;
|
||||
qv = qvl->qv_info;
|
||||
|
||||
qv->ceq_idx = IRDMA_Q_INVALID_IDX;
|
||||
qv->v_idx = v_idx;
|
||||
qv->itr_idx = IRDMA_IDX_ITR0;
|
||||
|
||||
info.op_code = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP;
|
||||
info.op_ver = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0;
|
||||
info.req_parm = qvl;
|
||||
info.req_parm_len = qvl_size;
|
||||
|
||||
ret = irdma_vchnl_req_send_sync(dev, &info);
|
||||
kfree(qvl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_vchnl_req_ceq_vec_map - Map CEQ to vector on this function
|
||||
* @dev: RDMA device pointer
|
||||
* @ceq_id: CEQ index
|
||||
* @v_idx: vector index
|
||||
*/
|
||||
int irdma_vchnl_req_ceq_vec_map(struct irdma_sc_dev *dev, u16 ceq_id, u32 v_idx)
|
||||
{
|
||||
struct irdma_vchnl_req_init_info info = {};
|
||||
struct irdma_vchnl_qvlist_info *qvl;
|
||||
struct irdma_vchnl_qv_info *qv;
|
||||
u16 qvl_size, num_vectors = 1;
|
||||
int ret;
|
||||
|
||||
if (!dev->vchnl_up)
|
||||
return -EBUSY;
|
||||
|
||||
qvl_size = struct_size(qvl, qv_info, num_vectors);
|
||||
|
||||
qvl = kzalloc(qvl_size, GFP_KERNEL);
|
||||
if (!qvl)
|
||||
return -ENOMEM;
|
||||
|
||||
qvl->num_vectors = num_vectors;
|
||||
qv = qvl->qv_info;
|
||||
|
||||
qv->aeq_idx = IRDMA_Q_INVALID_IDX;
|
||||
qv->ceq_idx = ceq_id;
|
||||
qv->v_idx = v_idx;
|
||||
qv->itr_idx = IRDMA_IDX_ITR0;
|
||||
|
||||
info.op_code = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP;
|
||||
info.op_ver = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0;
|
||||
info.req_parm = qvl;
|
||||
info.req_parm_len = qvl_size;
|
||||
|
||||
ret = irdma_vchnl_req_send_sync(dev, &info);
|
||||
kfree(qvl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_vchnl_req_get_ver - Request Channel version
|
||||
* @dev: RDMA device pointer
|
||||
* @ver_req: Virtual channel version requested
|
||||
* @ver_res: Virtual channel version response
|
||||
*/
|
||||
int irdma_vchnl_req_get_ver(struct irdma_sc_dev *dev, u16 ver_req, u32 *ver_res)
|
||||
{
|
||||
struct irdma_vchnl_req_init_info info = {};
|
||||
int ret;
|
||||
|
||||
if (!dev->vchnl_up)
|
||||
return -EBUSY;
|
||||
|
||||
info.op_code = IRDMA_VCHNL_OP_GET_VER;
|
||||
info.op_ver = ver_req;
|
||||
info.resp_parm = ver_res;
|
||||
info.resp_parm_len = sizeof(*ver_res);
|
||||
|
||||
ret = irdma_vchnl_req_send_sync(dev, &info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (*ver_res < IRDMA_VCHNL_CHNL_VER_MIN) {
|
||||
ibdev_dbg(to_ibdev(dev),
|
||||
"VIRT: %s unsupported vchnl version 0x%0x\n",
|
||||
__func__, *ver_res);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_vchnl_req_get_hmc_fcn - Request VF HMC Function
|
||||
* @dev: RDMA device pointer
|
||||
*/
|
||||
int irdma_vchnl_req_get_hmc_fcn(struct irdma_sc_dev *dev)
|
||||
{
|
||||
struct irdma_vchnl_req_hmc_info req_hmc = {};
|
||||
struct irdma_vchnl_resp_hmc_info resp_hmc = {};
|
||||
struct irdma_vchnl_req_init_info info = {};
|
||||
int ret;
|
||||
|
||||
if (!dev->vchnl_up)
|
||||
return -EBUSY;
|
||||
|
||||
info.op_code = IRDMA_VCHNL_OP_GET_HMC_FCN;
|
||||
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
|
||||
info.op_ver = IRDMA_VCHNL_OP_GET_HMC_FCN_V2;
|
||||
req_hmc.protocol_used = dev->protocol_used;
|
||||
info.req_parm_len = sizeof(req_hmc);
|
||||
info.req_parm = &req_hmc;
|
||||
info.resp_parm = &resp_hmc;
|
||||
info.resp_parm_len = sizeof(resp_hmc);
|
||||
}
|
||||
|
||||
ret = irdma_vchnl_req_send_sync(dev, &info);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
|
||||
int i;
|
||||
|
||||
dev->hmc_fn_id = resp_hmc.hmc_func;
|
||||
|
||||
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
|
||||
dev->qos[i].qs_handle = resp_hmc.qs_handle[i];
|
||||
dev->qos[i].valid = true;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_vchnl_req_put_hmc_fcn - Free VF HMC Function
|
||||
* @dev: RDMA device pointer
|
||||
*/
|
||||
int irdma_vchnl_req_put_hmc_fcn(struct irdma_sc_dev *dev)
|
||||
{
|
||||
struct irdma_vchnl_req_init_info info = {};
|
||||
|
||||
if (!dev->vchnl_up)
|
||||
return -EBUSY;
|
||||
|
||||
info.op_code = IRDMA_VCHNL_OP_PUT_HMC_FCN;
|
||||
info.op_ver = IRDMA_VCHNL_OP_PUT_HMC_FCN_V0;
|
||||
|
||||
return irdma_vchnl_req_send_sync(dev, &info);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_vchnl_req_get_caps - Request RDMA capabilities
|
||||
* @dev: RDMA device pointer
|
||||
*/
|
||||
int irdma_vchnl_req_get_caps(struct irdma_sc_dev *dev)
|
||||
{
|
||||
struct irdma_vchnl_req_init_info info = {};
|
||||
int ret;
|
||||
|
||||
if (!dev->vchnl_up)
|
||||
return -EBUSY;
|
||||
|
||||
info.op_code = IRDMA_VCHNL_OP_GET_RDMA_CAPS;
|
||||
info.op_ver = IRDMA_VCHNL_OP_GET_RDMA_CAPS_V0;
|
||||
info.resp_parm = &dev->vc_caps;
|
||||
info.resp_parm_len = sizeof(dev->vc_caps);
|
||||
|
||||
ret = irdma_vchnl_req_send_sync(dev, &info);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dev->vc_caps.hw_rev > IRDMA_GEN_MAX ||
|
||||
dev->vc_caps.hw_rev < IRDMA_GEN_2) {
|
||||
ibdev_dbg(to_ibdev(dev),
|
||||
"ERR: %s unsupported hw_rev version 0x%0x\n",
|
||||
__func__, dev->vc_caps.hw_rev);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_vchnl_req_get_resp - Receive the inbound vchnl response.
|
||||
* @dev: Dev pointer
|
||||
* @vchnl_req: Vchannel request
|
||||
*/
|
||||
int irdma_vchnl_req_get_resp(struct irdma_sc_dev *dev,
|
||||
struct irdma_vchnl_req *vchnl_req)
|
||||
{
|
||||
struct irdma_vchnl_resp_buf *vchnl_msg_resp =
|
||||
(struct irdma_vchnl_resp_buf *)dev->vc_recv_buf;
|
||||
u16 resp_len;
|
||||
int ret;
|
||||
|
||||
if ((uintptr_t)vchnl_req != (uintptr_t)vchnl_msg_resp->op_ctx) {
|
||||
ibdev_dbg(to_ibdev(dev),
|
||||
"VIRT: error vchnl context value does not match\n");
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
resp_len = dev->vc_recv_len - sizeof(*vchnl_msg_resp);
|
||||
resp_len = min(resp_len, vchnl_req->parm_len);
|
||||
|
||||
ret = irdma_vchnl_req_verify_resp(vchnl_req, resp_len);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = (int)vchnl_msg_resp->op_ret;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vchnl_req->resp_len = 0;
|
||||
if (vchnl_req->parm_len && vchnl_req->parm && resp_len) {
|
||||
memcpy(vchnl_req->parm, vchnl_msg_resp->buf, resp_len);
|
||||
vchnl_req->resp_len = resp_len;
|
||||
ibdev_dbg(to_ibdev(dev), "VIRT: Got response, data size %u\n",
|
||||
resp_len);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
176
drivers/infiniband/hw/irdma/virtchnl.h
Normal file
176
drivers/infiniband/hw/irdma/virtchnl.h
Normal file
@@ -0,0 +1,176 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
|
||||
/* Copyright (c) 2015 - 2024 Intel Corporation */
|
||||
#ifndef IRDMA_VIRTCHNL_H
|
||||
#define IRDMA_VIRTCHNL_H
|
||||
|
||||
#include "hmc.h"
|
||||
#include "irdma.h"
|
||||
|
||||
/* IRDMA_VCHNL_CHNL_VER_V0 is for legacy hw, no longer supported. */
|
||||
#define IRDMA_VCHNL_CHNL_VER_V2 2
|
||||
#define IRDMA_VCHNL_CHNL_VER_MIN IRDMA_VCHNL_CHNL_VER_V2
|
||||
#define IRDMA_VCHNL_CHNL_VER_MAX IRDMA_VCHNL_CHNL_VER_V2
|
||||
#define IRDMA_VCHNL_OP_GET_HMC_FCN_V0 0
|
||||
#define IRDMA_VCHNL_OP_GET_HMC_FCN_V1 1
|
||||
#define IRDMA_VCHNL_OP_GET_HMC_FCN_V2 2
|
||||
#define IRDMA_VCHNL_OP_PUT_HMC_FCN_V0 0
|
||||
#define IRDMA_VCHNL_OP_GET_REG_LAYOUT_V0 0
|
||||
#define IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0 0
|
||||
#define IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP_V0 0
|
||||
#define IRDMA_VCHNL_OP_ADD_VPORT_V0 0
|
||||
#define IRDMA_VCHNL_OP_DEL_VPORT_V0 0
|
||||
#define IRDMA_VCHNL_OP_GET_RDMA_CAPS_V0 0
|
||||
#define IRDMA_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE 1
|
||||
|
||||
#define IRDMA_VCHNL_REG_ID_CQPTAIL 0
|
||||
#define IRDMA_VCHNL_REG_ID_CQPDB 1
|
||||
#define IRDMA_VCHNL_REG_ID_CCQPSTATUS 2
|
||||
#define IRDMA_VCHNL_REG_ID_CCQPHIGH 3
|
||||
#define IRDMA_VCHNL_REG_ID_CCQPLOW 4
|
||||
#define IRDMA_VCHNL_REG_ID_CQARM 5
|
||||
#define IRDMA_VCHNL_REG_ID_CQACK 6
|
||||
#define IRDMA_VCHNL_REG_ID_AEQALLOC 7
|
||||
#define IRDMA_VCHNL_REG_ID_CQPERRCODES 8
|
||||
#define IRDMA_VCHNL_REG_ID_WQEALLOC 9
|
||||
#define IRDMA_VCHNL_REG_ID_IPCONFIG0 10
|
||||
#define IRDMA_VCHNL_REG_ID_DB_ADDR_OFFSET 11
|
||||
#define IRDMA_VCHNL_REG_ID_DYN_CTL 12
|
||||
#define IRDMA_VCHNL_REG_ID_AEQITRMASK 13
|
||||
#define IRDMA_VCHNL_REG_ID_CEQITRMASK 14
|
||||
#define IRDMA_VCHNL_REG_INV_ID 0xFFFF
|
||||
#define IRDMA_VCHNL_REG_PAGE_REL 0x8000
|
||||
|
||||
#define IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CQP_OP_ERR 2
|
||||
#define IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CCQP_DONE 5
|
||||
#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_STAG_PDID 6
|
||||
#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CEQID 7
|
||||
#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CQID 8
|
||||
#define IRDMA_VCHNL_REGFLD_ID_COMMIT_FPM_CQCNT 9
|
||||
#define IRDMA_VCHNL_REGFLD_ID_UPESD_HMCN_ID 10
|
||||
#define IRDMA_VCHNL_REGFLD_INV_ID 0xFFFF
|
||||
|
||||
#define IRDMA_VCHNL_RESP_MIN_SIZE (sizeof(struct irdma_vchnl_resp_buf))
|
||||
|
||||
enum irdma_vchnl_ops {
|
||||
IRDMA_VCHNL_OP_GET_VER = 0,
|
||||
IRDMA_VCHNL_OP_GET_HMC_FCN = 1,
|
||||
IRDMA_VCHNL_OP_PUT_HMC_FCN = 2,
|
||||
IRDMA_VCHNL_OP_GET_REG_LAYOUT = 11,
|
||||
IRDMA_VCHNL_OP_GET_RDMA_CAPS = 13,
|
||||
IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP = 14,
|
||||
IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP = 15,
|
||||
IRDMA_VCHNL_OP_ADD_VPORT = 16,
|
||||
IRDMA_VCHNL_OP_DEL_VPORT = 17,
|
||||
};
|
||||
|
||||
struct irdma_vchnl_req_hmc_info {
|
||||
u8 protocol_used;
|
||||
u8 disable_qos;
|
||||
} __packed;
|
||||
|
||||
struct irdma_vchnl_resp_hmc_info {
|
||||
u16 hmc_func;
|
||||
u16 qs_handle[IRDMA_MAX_USER_PRIORITY];
|
||||
} __packed;
|
||||
|
||||
struct irdma_vchnl_qv_info {
|
||||
u32 v_idx;
|
||||
u16 ceq_idx;
|
||||
u16 aeq_idx;
|
||||
u8 itr_idx;
|
||||
};
|
||||
|
||||
struct irdma_vchnl_qvlist_info {
|
||||
u32 num_vectors;
|
||||
struct irdma_vchnl_qv_info qv_info[];
|
||||
};
|
||||
|
||||
struct irdma_vchnl_req_vport_info {
|
||||
u16 vport_id;
|
||||
u32 qp1_id;
|
||||
};
|
||||
|
||||
struct irdma_vchnl_resp_vport_info {
|
||||
u16 qs_handle[IRDMA_MAX_USER_PRIORITY];
|
||||
};
|
||||
|
||||
struct irdma_vchnl_op_buf {
|
||||
u16 op_code;
|
||||
u16 op_ver;
|
||||
u16 buf_len;
|
||||
u16 rsvd;
|
||||
u64 op_ctx;
|
||||
u8 buf[];
|
||||
} __packed;
|
||||
|
||||
struct irdma_vchnl_resp_buf {
|
||||
u64 op_ctx;
|
||||
u16 buf_len;
|
||||
s16 op_ret;
|
||||
u16 rsvd[2];
|
||||
u8 buf[];
|
||||
} __packed;
|
||||
|
||||
struct irdma_vchnl_rdma_caps {
|
||||
u8 hw_rev;
|
||||
u16 cqp_timeout_s;
|
||||
u16 cqp_def_timeout_s;
|
||||
u16 max_hw_push_len;
|
||||
} __packed;
|
||||
|
||||
struct irdma_vchnl_init_info {
|
||||
struct workqueue_struct *vchnl_wq;
|
||||
enum irdma_vers hw_rev;
|
||||
bool privileged;
|
||||
bool is_pf;
|
||||
};
|
||||
|
||||
struct irdma_vchnl_reg_info {
|
||||
u32 reg_offset;
|
||||
u16 field_cnt;
|
||||
u16 reg_id; /* High bit of reg_id: bar or page relative */
|
||||
};
|
||||
|
||||
struct irdma_vchnl_reg_field_info {
|
||||
u8 fld_shift;
|
||||
u8 fld_bits;
|
||||
u16 fld_id;
|
||||
};
|
||||
|
||||
struct irdma_vchnl_req {
|
||||
struct irdma_vchnl_op_buf *vchnl_msg;
|
||||
void *parm;
|
||||
u32 vf_id;
|
||||
u16 parm_len;
|
||||
u16 resp_len;
|
||||
};
|
||||
|
||||
struct irdma_vchnl_req_init_info {
|
||||
void *req_parm;
|
||||
void *resp_parm;
|
||||
u16 req_parm_len;
|
||||
u16 resp_parm_len;
|
||||
u16 op_code;
|
||||
u16 op_ver;
|
||||
} __packed;
|
||||
|
||||
struct irdma_qos;
|
||||
|
||||
int irdma_sc_vchnl_init(struct irdma_sc_dev *dev,
|
||||
struct irdma_vchnl_init_info *info);
|
||||
int irdma_vchnl_req_get_ver(struct irdma_sc_dev *dev, u16 ver_req,
|
||||
u32 *ver_res);
|
||||
int irdma_vchnl_req_get_hmc_fcn(struct irdma_sc_dev *dev);
|
||||
int irdma_vchnl_req_put_hmc_fcn(struct irdma_sc_dev *dev);
|
||||
int irdma_vchnl_req_get_caps(struct irdma_sc_dev *dev);
|
||||
int irdma_vchnl_req_get_resp(struct irdma_sc_dev *dev,
|
||||
struct irdma_vchnl_req *vc_req);
|
||||
int irdma_vchnl_req_get_reg_layout(struct irdma_sc_dev *dev);
|
||||
int irdma_vchnl_req_aeq_vec_map(struct irdma_sc_dev *dev, u32 v_idx);
|
||||
int irdma_vchnl_req_ceq_vec_map(struct irdma_sc_dev *dev, u16 ceq_id,
|
||||
u32 v_idx);
|
||||
int irdma_vchnl_req_add_vport(struct irdma_sc_dev *dev, u16 vport_id,
|
||||
u32 qp1_id, struct irdma_qos *qos);
|
||||
int irdma_vchnl_req_del_vport(struct irdma_sc_dev *dev, u16 vport_id,
|
||||
u32 qp1_id);
|
||||
#endif /* IRDMA_VIRTCHNL_H */
|
||||
@@ -291,6 +291,32 @@ out:
|
||||
return wc_index;
|
||||
}
|
||||
|
||||
void mana_drain_gsi_sqs(struct mana_ib_dev *mdev)
|
||||
{
|
||||
struct mana_ib_qp *qp = mana_get_qp_ref(mdev, MANA_GSI_QPN, false);
|
||||
struct ud_sq_shadow_wqe *shadow_wqe;
|
||||
struct mana_ib_cq *cq;
|
||||
unsigned long flags;
|
||||
|
||||
if (!qp)
|
||||
return;
|
||||
|
||||
cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, flags);
|
||||
while ((shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq))
|
||||
!= NULL) {
|
||||
shadow_wqe->header.error_code = IB_WC_GENERAL_ERR;
|
||||
shadow_queue_advance_next_to_complete(&qp->shadow_sq);
|
||||
}
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
|
||||
if (cq->ibcq.comp_handler)
|
||||
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
|
||||
|
||||
mana_put_qp_ref(qp);
|
||||
}
|
||||
|
||||
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
||||
{
|
||||
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
|
||||
|
||||
@@ -230,6 +230,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
|
||||
{
|
||||
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
|
||||
|
||||
if (mana_ib_is_rnic(dev))
|
||||
mana_drain_gsi_sqs(dev);
|
||||
|
||||
ib_unregister_device(&dev->ib_dev);
|
||||
dma_pool_destroy(dev->av_pool);
|
||||
if (mana_ib_is_rnic(dev)) {
|
||||
|
||||
@@ -273,9 +273,8 @@ int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
|
||||
|
||||
umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(umem)) {
|
||||
err = PTR_ERR(umem);
|
||||
ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
|
||||
return err;
|
||||
ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %pe\n", umem);
|
||||
return PTR_ERR(umem);
|
||||
}
|
||||
|
||||
err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
|
||||
|
||||
@@ -43,6 +43,8 @@
|
||||
*/
|
||||
#define MANA_AV_BUFFER_SIZE 64
|
||||
|
||||
#define MANA_GSI_QPN (1)
|
||||
|
||||
struct mana_ib_adapter_caps {
|
||||
u32 max_sq_id;
|
||||
u32 max_rq_id;
|
||||
@@ -410,7 +412,7 @@ struct mana_ib_ah_attr {
|
||||
u8 traffic_class;
|
||||
u16 src_port;
|
||||
u16 dest_port;
|
||||
u32 reserved;
|
||||
u32 flow_label;
|
||||
};
|
||||
|
||||
struct mana_rnic_set_qp_state_req {
|
||||
@@ -427,8 +429,15 @@ struct mana_rnic_set_qp_state_req {
|
||||
u32 retry_cnt;
|
||||
u32 rnr_retry;
|
||||
u32 min_rnr_timer;
|
||||
u32 reserved;
|
||||
u32 rate_limit;
|
||||
struct mana_ib_ah_attr ah_attr;
|
||||
u64 reserved1;
|
||||
u32 qkey;
|
||||
u32 qp_access_flags;
|
||||
u8 local_ack_timeout;
|
||||
u8 max_rd_atomic;
|
||||
u16 reserved2;
|
||||
u32 reserved3;
|
||||
}; /* HW Data */
|
||||
|
||||
struct mana_rnic_set_qp_state_resp {
|
||||
@@ -718,6 +727,7 @@ int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
|
||||
void mana_drain_gsi_sqs(struct mana_ib_dev *mdev);
|
||||
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
|
||||
|
||||
@@ -138,7 +138,8 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
|
||||
if (IS_ERR(mr->umem)) {
|
||||
err = PTR_ERR(mr->umem);
|
||||
ibdev_dbg(ibdev,
|
||||
"Failed to get umem for register user-mr, %d\n", err);
|
||||
"Failed to get umem for register user-mr, %pe\n",
|
||||
mr->umem);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
@@ -220,7 +221,8 @@ struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 leng
|
||||
umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
|
||||
if (IS_ERR(umem_dmabuf)) {
|
||||
err = PTR_ERR(umem_dmabuf);
|
||||
ibdev_dbg(ibdev, "Failed to get dmabuf umem, %d\n", err);
|
||||
ibdev_dbg(ibdev, "Failed to get dmabuf umem, %pe\n",
|
||||
umem_dmabuf);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
|
||||
@@ -735,6 +735,8 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int err;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
|
||||
|
||||
req.hdr.req.msg_version = GDMA_MESSAGE_V3;
|
||||
req.hdr.dev_id = mdev->gdma_dev->dev_id;
|
||||
req.adapter = mdev->adapter_handle;
|
||||
req.qp_handle = qp->qp_handle;
|
||||
@@ -748,6 +750,12 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
req.retry_cnt = attr->retry_cnt;
|
||||
req.rnr_retry = attr->rnr_retry;
|
||||
req.min_rnr_timer = attr->min_rnr_timer;
|
||||
req.rate_limit = attr->rate_limit;
|
||||
req.qkey = attr->qkey;
|
||||
req.local_ack_timeout = attr->timeout;
|
||||
req.qp_access_flags = attr->qp_access_flags;
|
||||
req.max_rd_atomic = attr->max_rd_atomic;
|
||||
|
||||
if (attr_mask & IB_QP_AV) {
|
||||
ndev = mana_ib_get_netdev(&mdev->ib_dev, ibqp->port);
|
||||
if (!ndev) {
|
||||
@@ -774,6 +782,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
ibqp->qp_num, attr->dest_qp_num);
|
||||
req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
|
||||
req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
|
||||
req.ah_attr.flow_label = attr->ah_attr.grh.flow_label;
|
||||
}
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
|
||||
@@ -1836,9 +1836,9 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
|
||||
tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
|
||||
if (IS_ERR(tun_qp->qp)) {
|
||||
ret = PTR_ERR(tun_qp->qp);
|
||||
pr_err("Couldn't create %s QP (%pe)\n",
|
||||
create_tun ? "tunnel" : "special", tun_qp->qp);
|
||||
tun_qp->qp = NULL;
|
||||
pr_err("Couldn't create %s QP (%d)\n",
|
||||
create_tun ? "tunnel" : "special", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2017,14 +2017,14 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
|
||||
NULL, ctx, &cq_attr);
|
||||
if (IS_ERR(ctx->cq)) {
|
||||
ret = PTR_ERR(ctx->cq);
|
||||
pr_err("Couldn't create tunnel CQ (%d)\n", ret);
|
||||
pr_err("Couldn't create tunnel CQ (%pe)\n", ctx->cq);
|
||||
goto err_buf;
|
||||
}
|
||||
|
||||
ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
|
||||
if (IS_ERR(ctx->pd)) {
|
||||
ret = PTR_ERR(ctx->pd);
|
||||
pr_err("Couldn't create tunnel PD (%d)\n", ret);
|
||||
pr_err("Couldn't create tunnel PD (%pe)\n", ctx->pd);
|
||||
goto err_cq;
|
||||
}
|
||||
|
||||
|
||||
@@ -1652,7 +1652,8 @@ int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
|
||||
sqp->roce_v2_gsi = ib_create_qp(pd, init_attr);
|
||||
|
||||
if (IS_ERR(sqp->roce_v2_gsi)) {
|
||||
pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi));
|
||||
pr_err("Failed to create GSI QP for RoCEv2 (%pe)\n",
|
||||
sqp->roce_v2_gsi);
|
||||
sqp->roce_v2_gsi = NULL;
|
||||
} else {
|
||||
to_mqp(sqp->roce_v2_gsi)->flags |=
|
||||
|
||||
@@ -35,7 +35,7 @@ static int mlx5_data_direct_vpd_get_vuid(struct mlx5_data_direct_dev *dev)
|
||||
|
||||
vpd_data = pci_vpd_alloc(pdev, &vpd_size);
|
||||
if (IS_ERR(vpd_data)) {
|
||||
pci_err(pdev, "Unable to read VPD, err=%ld\n", PTR_ERR(vpd_data));
|
||||
pci_err(pdev, "Unable to read VPD, err=%pe\n", vpd_data);
|
||||
return PTR_ERR(vpd_data);
|
||||
}
|
||||
|
||||
|
||||
@@ -131,8 +131,9 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
|
||||
gsi->cq = ib_alloc_cq(pd->device, gsi, attr->cap.max_send_wr, 0,
|
||||
IB_POLL_SOFTIRQ);
|
||||
if (IS_ERR(gsi->cq)) {
|
||||
mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
|
||||
PTR_ERR(gsi->cq));
|
||||
mlx5_ib_warn(dev,
|
||||
"unable to create send CQ for GSI QP. error %pe\n",
|
||||
gsi->cq);
|
||||
ret = PTR_ERR(gsi->cq);
|
||||
goto err_free_wrs;
|
||||
}
|
||||
@@ -147,8 +148,9 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
|
||||
|
||||
gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
|
||||
if (IS_ERR(gsi->rx_qp)) {
|
||||
mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
|
||||
PTR_ERR(gsi->rx_qp));
|
||||
mlx5_ib_warn(dev,
|
||||
"unable to create hardware GSI QP. error %pe\n",
|
||||
gsi->rx_qp);
|
||||
ret = PTR_ERR(gsi->rx_qp);
|
||||
goto err_destroy_cq;
|
||||
}
|
||||
@@ -294,8 +296,9 @@ static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
|
||||
|
||||
qp = create_gsi_ud_qp(gsi);
|
||||
if (IS_ERR(qp)) {
|
||||
mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n",
|
||||
PTR_ERR(qp));
|
||||
mlx5_ib_warn(dev,
|
||||
"unable to create hardware UD QP for GSI: %pe\n",
|
||||
qp);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/sched/task.h>
|
||||
@@ -883,6 +884,51 @@ static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
|
||||
resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate maximum SQ overhead across all QP types.
|
||||
* Other QP types (REG_UMR, UC, RC, UD/SMI/GSI, XRC_TGT)
|
||||
* have smaller overhead than the types calculated below,
|
||||
* so they are implicitly included.
|
||||
*/
|
||||
static u32 mlx5_ib_calc_max_sq_overhead(void)
|
||||
{
|
||||
u32 max_overhead_xrc, overhead_ud_lso, a, b;
|
||||
|
||||
/* XRC_INI */
|
||||
max_overhead_xrc = sizeof(struct mlx5_wqe_xrc_seg);
|
||||
max_overhead_xrc += sizeof(struct mlx5_wqe_ctrl_seg);
|
||||
a = sizeof(struct mlx5_wqe_atomic_seg) +
|
||||
sizeof(struct mlx5_wqe_raddr_seg);
|
||||
b = sizeof(struct mlx5_wqe_umr_ctrl_seg) +
|
||||
sizeof(struct mlx5_mkey_seg) +
|
||||
MLX5_IB_SQ_UMR_INLINE_THRESHOLD / MLX5_IB_UMR_OCTOWORD;
|
||||
max_overhead_xrc += max(a, b);
|
||||
|
||||
/* UD with LSO */
|
||||
overhead_ud_lso = sizeof(struct mlx5_wqe_ctrl_seg);
|
||||
overhead_ud_lso += sizeof(struct mlx5_wqe_eth_pad);
|
||||
overhead_ud_lso += sizeof(struct mlx5_wqe_eth_seg);
|
||||
overhead_ud_lso += sizeof(struct mlx5_wqe_datagram_seg);
|
||||
|
||||
return max(max_overhead_xrc, overhead_ud_lso);
|
||||
}
|
||||
|
||||
static u32 mlx5_ib_calc_max_qp_wr(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
u32 max_wqe_bb_units = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
|
||||
u32 max_wqe_size;
|
||||
/* max QP overhead + 1 SGE, no inline, no special features */
|
||||
max_wqe_size = mlx5_ib_calc_max_sq_overhead() +
|
||||
sizeof(struct mlx5_wqe_data_seg);
|
||||
|
||||
max_wqe_size = roundup_pow_of_two(max_wqe_size);
|
||||
|
||||
max_wqe_size = ALIGN(max_wqe_size, MLX5_SEND_WQE_BB);
|
||||
|
||||
return (max_wqe_bb_units * MLX5_SEND_WQE_BB) / max_wqe_size;
|
||||
}
|
||||
|
||||
static int mlx5_ib_query_device(struct ib_device *ibdev,
|
||||
struct ib_device_attr *props,
|
||||
struct ib_udata *uhw)
|
||||
@@ -1041,7 +1087,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
||||
props->max_mr_size = ~0ull;
|
||||
props->page_size_cap = ~(min_page_size - 1);
|
||||
props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
|
||||
props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
|
||||
props->max_qp_wr = mlx5_ib_calc_max_qp_wr(dev);
|
||||
max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
|
||||
sizeof(struct mlx5_wqe_data_seg);
|
||||
max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
|
||||
@@ -1793,7 +1839,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
|
||||
}
|
||||
|
||||
static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
|
||||
struct mlx5_core_dev *slave)
|
||||
struct mlx5_core_dev *slave,
|
||||
struct mlx5_ib_lb_state *lb_state)
|
||||
{
|
||||
int err;
|
||||
|
||||
@@ -1805,6 +1852,7 @@ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
lb_state->force_enable = true;
|
||||
return 0;
|
||||
|
||||
out:
|
||||
@@ -1813,16 +1861,22 @@ out:
|
||||
}
|
||||
|
||||
static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
|
||||
struct mlx5_core_dev *slave)
|
||||
struct mlx5_core_dev *slave,
|
||||
struct mlx5_ib_lb_state *lb_state)
|
||||
{
|
||||
mlx5_nic_vport_update_local_lb(slave, false);
|
||||
mlx5_nic_vport_update_local_lb(master, false);
|
||||
|
||||
lb_state->force_enable = false;
|
||||
}
|
||||
|
||||
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (dev->lb.force_enable)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dev->lb.mutex);
|
||||
if (td)
|
||||
dev->lb.user_td++;
|
||||
@@ -1844,6 +1898,9 @@ int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
|
||||
|
||||
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
|
||||
{
|
||||
if (dev->lb.force_enable)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev->lb.mutex);
|
||||
if (td)
|
||||
dev->lb.user_td--;
|
||||
@@ -2994,14 +3051,16 @@ int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev)
|
||||
pd = ib_alloc_pd(ibdev, 0);
|
||||
if (IS_ERR(pd)) {
|
||||
ret = PTR_ERR(pd);
|
||||
mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%d\n", ret);
|
||||
mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%pe\n",
|
||||
pd);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
|
||||
if (IS_ERR(cq)) {
|
||||
ret = PTR_ERR(cq);
|
||||
mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%d\n", ret);
|
||||
mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%pe\n",
|
||||
cq);
|
||||
ib_dealloc_pd(pd);
|
||||
goto unlock;
|
||||
}
|
||||
@@ -3045,7 +3104,9 @@ int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
|
||||
s0 = ib_create_srq(devr->p0, &attr);
|
||||
if (IS_ERR(s0)) {
|
||||
ret = PTR_ERR(s0);
|
||||
mlx5_ib_err(dev, "Couldn't create SRQ 0 for res init, err=%d\n", ret);
|
||||
mlx5_ib_err(dev,
|
||||
"Couldn't create SRQ 0 for res init, err=%pe\n",
|
||||
s0);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@@ -3057,7 +3118,9 @@ int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
|
||||
s1 = ib_create_srq(devr->p0, &attr);
|
||||
if (IS_ERR(s1)) {
|
||||
ret = PTR_ERR(s1);
|
||||
mlx5_ib_err(dev, "Couldn't create SRQ 1 for res init, err=%d\n", ret);
|
||||
mlx5_ib_err(dev,
|
||||
"Couldn't create SRQ 1 for res init, err=%pe\n",
|
||||
s1);
|
||||
ib_destroy_srq(s0);
|
||||
}
|
||||
|
||||
@@ -3118,6 +3181,7 @@ mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
bool ro_supp = false;
|
||||
void *mkc;
|
||||
u32 mkey;
|
||||
u32 pdn;
|
||||
@@ -3146,14 +3210,37 @@ mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
|
||||
MLX5_SET(mkc, mkc, length64, 1);
|
||||
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
||||
err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
|
||||
kvfree(in);
|
||||
if (err)
|
||||
goto err;
|
||||
goto err_mkey;
|
||||
|
||||
dev->ddr.mkey = mkey;
|
||||
dev->ddr.pdn = pdn;
|
||||
|
||||
/* create another mkey with RO support */
|
||||
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) {
|
||||
MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
|
||||
ro_supp = true;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) {
|
||||
MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
|
||||
ro_supp = true;
|
||||
}
|
||||
|
||||
if (ro_supp) {
|
||||
err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
|
||||
/* RO is defined as best effort */
|
||||
if (!err) {
|
||||
dev->ddr.mkey_ro = mkey;
|
||||
dev->ddr.mkey_ro_valid = true;
|
||||
}
|
||||
}
|
||||
|
||||
kvfree(in);
|
||||
return 0;
|
||||
|
||||
err_mkey:
|
||||
kvfree(in);
|
||||
err:
|
||||
mlx5_core_dealloc_pd(mdev, pdn);
|
||||
return err;
|
||||
@@ -3162,6 +3249,10 @@ err:
|
||||
static void
|
||||
mlx5_ib_free_data_direct_resources(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
|
||||
if (dev->ddr.mkey_ro_valid)
|
||||
mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey_ro);
|
||||
|
||||
mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey);
|
||||
mlx5_core_dealloc_pd(dev->mdev, dev->ddr.pdn);
|
||||
}
|
||||
@@ -3523,7 +3614,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
|
||||
|
||||
lockdep_assert_held(&mlx5_ib_multiport_mutex);
|
||||
|
||||
mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev);
|
||||
mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
|
||||
|
||||
mlx5_core_mp_event_replay(ibdev->mdev,
|
||||
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
|
||||
@@ -3620,7 +3711,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
|
||||
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
|
||||
&key);
|
||||
|
||||
err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev);
|
||||
err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
|
||||
if (err)
|
||||
goto unbind;
|
||||
|
||||
|
||||
@@ -854,6 +854,8 @@ struct mlx5_ib_port_resources {
|
||||
struct mlx5_data_direct_resources {
|
||||
u32 pdn;
|
||||
u32 mkey;
|
||||
u32 mkey_ro;
|
||||
u8 mkey_ro_valid :1;
|
||||
};
|
||||
|
||||
struct mlx5_ib_resources {
|
||||
@@ -1109,6 +1111,7 @@ struct mlx5_ib_lb_state {
|
||||
u32 user_td;
|
||||
int qps;
|
||||
bool enabled;
|
||||
bool force_enable;
|
||||
};
|
||||
|
||||
struct mlx5_ib_pf_eq {
|
||||
@@ -1802,6 +1805,10 @@ mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
|
||||
|
||||
bitmap = GENMASK_ULL(max_log_entity_size_cap, min_log_entity_size_cap);
|
||||
|
||||
/* In KSM mode HW requires IOVA and mkey's page size to be aligned */
|
||||
if (access_mode == MLX5_MKC_ACCESS_MODE_KSM && iova)
|
||||
bitmap &= GENMASK_ULL(__ffs64(iova), 0);
|
||||
|
||||
return ib_umem_find_best_pgsz(umem, bitmap, iova);
|
||||
}
|
||||
|
||||
|
||||
@@ -1652,8 +1652,7 @@ reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
|
||||
fd, access_flags);
|
||||
|
||||
if (IS_ERR(umem_dmabuf)) {
|
||||
mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
|
||||
PTR_ERR(umem_dmabuf));
|
||||
mlx5_ib_dbg(dev, "umem_dmabuf get failed (%pe)\n", umem_dmabuf);
|
||||
return ERR_CAST(umem_dmabuf);
|
||||
}
|
||||
|
||||
@@ -1717,11 +1716,11 @@ reg_user_mr_dmabuf_by_data_direct(struct ib_pd *pd, u64 offset,
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* The device's 'data direct mkey' was created without RO flags to
|
||||
* simplify things and allow for a single mkey per device.
|
||||
* Since RO is not a must, mask it out accordingly.
|
||||
/* If no device's 'data direct mkey' with RO flags exists
|
||||
* mask it out accordingly.
|
||||
*/
|
||||
access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
|
||||
if (!dev->ddr.mkey_ro_valid)
|
||||
access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
|
||||
crossed_mr = reg_user_mr_dmabuf(pd, &data_direct_dev->pdev->dev,
|
||||
offset, length, virt_addr, fd,
|
||||
access_flags, MLX5_MKC_ACCESS_MODE_KSM,
|
||||
|
||||
@@ -761,7 +761,11 @@ _mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd,
|
||||
|
||||
if (dd) {
|
||||
cur_ksm->va = cpu_to_be64(rdma_block_iter_dma_address(&biter));
|
||||
cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
|
||||
if (mr->access_flags & IB_ACCESS_RELAXED_ORDERING &&
|
||||
dev->ddr.mkey_ro_valid)
|
||||
cur_ksm->key = cpu_to_be32(dev->ddr.mkey_ro);
|
||||
else
|
||||
cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
|
||||
if (mr->umem->is_dmabuf &&
|
||||
(flags & MLX5_IB_UPD_XLT_ZAP)) {
|
||||
cur_ksm->va = 0;
|
||||
|
||||
@@ -492,7 +492,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
|
||||
{
|
||||
u32 i, offset, max_scan, qpn;
|
||||
struct rvt_qpn_map *map;
|
||||
u32 ret;
|
||||
int ret;
|
||||
u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
|
||||
RVT_AIP_QPN_MAX : RVT_QPN_MAX;
|
||||
|
||||
@@ -510,7 +510,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
|
||||
else
|
||||
qpt->flags |= n;
|
||||
spin_unlock(&qpt->lock);
|
||||
goto bail;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
qpn = qpt->last + qpt->incr;
|
||||
@@ -530,7 +531,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
|
||||
if (!test_and_set_bit(offset, map->page)) {
|
||||
qpt->last = qpn;
|
||||
ret = qpn;
|
||||
goto bail;
|
||||
|
||||
return ret;
|
||||
}
|
||||
offset += qpt->incr;
|
||||
/*
|
||||
@@ -565,10 +567,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
|
||||
qpn = mk_qpn(qpt, map, offset);
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
bail:
|
||||
return ret;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user