Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma updates from Jason Gunthorpe:
 "Lighter that normal, but the now usual collection of driver fixes and
  small improvements:

   - Small fixes and minor improvements to cxgb4, bnxt_re, rxe, srp,
     efa, cxgb4

   - Update mlx4 to use the new umem APIs, avoiding direct use of
     scatterlist

   - Support ROCEv2 in erdma

   - Remove various uncalled functions, constify bin_attribute

   - Provide core infrastructure to catch netdev events and route them
     to drivers, consolidating duplicated driver code

   - Fix rare race condition crashes in mlx5 ODP flows"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (63 commits)
  RDMA/mlx5: Fix implicit ODP use after free
  RDMA/mlx5: Fix a race for an ODP MR which leads to CQE with error
  RDMA/qib: Constify 'struct bin_attribute'
  RDMA/hfi1: Constify 'struct bin_attribute'
  RDMA/rxe: Fix the warning "__rxe_cleanup+0x12c/0x170 [rdma_rxe]"
  RDMA/cxgb4: Notify rdma stack for IB_EVENT_QP_LAST_WQE_REACHED event
  RDMA/bnxt_re: Allocate dev_attr information dynamically
  RDMA/bnxt_re: Pass the context for ulp_irq_stop
  RDMA/bnxt_re: Add support to handle DCB_CONFIG_CHANGE event
  RDMA/bnxt_re: Query firmware defaults of CC params during probe
  RDMA/bnxt_re: Add Async event handling support
  bnxt_en: Add ULP call to notify async events
  RDMA/mlx5: Fix indirect mkey ODP page count
  MAINTAINERS: Update the bnxt_re maintainers
  RDMA/hns: Clean up the legacy CONFIG_INFINIBAND_HNS
  RDMA/rtrs: Add missing deinit() call
  RDMA/efa: Align interrupt related fields to same type
  RDMA/bnxt_re: Fix to drop reference to the mmap entry in case of error
  RDMA/mlx5: Fix link status down event for MPV
  RDMA/erdma: Support create_ah/destroy_ah in non-sleepable contexts
  ...
This commit is contained in:
Linus Torvalds
2025-01-24 12:21:28 -08:00
68 changed files with 2014 additions and 1259 deletions

View File

@@ -4800,6 +4800,7 @@ F: drivers/scsi/mpi3mr/
BROADCOM NETXTREME-E ROCE DRIVER
M: Selvin Xavier <selvin.xavier@broadcom.com>
M: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.broadcom.com

View File

@@ -1127,41 +1127,6 @@ err:
}
EXPORT_SYMBOL(ib_find_cached_pkey);
int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num,
u16 pkey, u16 *index)
{
struct ib_pkey_cache *cache;
unsigned long flags;
int i;
int ret = -ENOENT;
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
read_lock_irqsave(&device->cache_lock, flags);
cache = device->port_data[port_num].cache.pkey;
if (!cache) {
ret = -EINVAL;
goto err;
}
*index = -1;
for (i = 0; i < cache->table_len; ++i)
if (cache->table[i] == pkey) {
*index = i;
ret = 0;
break;
}
err:
read_unlock_irqrestore(&device->cache_lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_find_exact_cached_pkey);
int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
{
unsigned long flags;

View File

@@ -209,23 +209,6 @@ static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
printk("%s(NULL ib_device): %pV", level, vaf);
}
void ibdev_printk(const char *level, const struct ib_device *ibdev,
const char *format, ...)
{
struct va_format vaf;
va_list args;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
__ibdev_printk(level, ibdev, &vaf);
va_end(args);
}
EXPORT_SYMBOL(ibdev_printk);
#define define_ibdev_printk_level(func, level) \
void func(const struct ib_device *ibdev, const char *fmt, ...) \
{ \
@@ -2295,6 +2278,33 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
}
EXPORT_SYMBOL(ib_device_get_netdev);
/**
* ib_query_netdev_port - Query the port number of a net_device
* associated with an ibdev
* @ibdev: IB device
* @ndev: Network device
* @port: IB port the net_device is connected to
*/
int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
u32 *port)
{
struct net_device *ib_ndev;
u32 port_num;
rdma_for_each_port(ibdev, port_num) {
ib_ndev = ib_device_get_netdev(ibdev, port_num);
if (ndev == ib_ndev) {
*port = port_num;
dev_put(ib_ndev);
return 0;
}
dev_put(ib_ndev);
}
return -ENOENT;
}
EXPORT_SYMBOL(ib_query_netdev_port);
/**
* ib_device_get_by_netdev - Find an IB device associated with a netdev
* @ndev: netdev to locate
@@ -2761,6 +2771,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
SET_DEVICE_OP(dev_ops, ufile_hw_cleanup);
SET_DEVICE_OP(dev_ops, report_port_event);
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_counters);
@@ -2854,11 +2865,62 @@ static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
},
};
void ib_dispatch_port_state_event(struct ib_device *ibdev, struct net_device *ndev)
{
enum ib_port_state curr_state;
struct ib_event ibevent = {};
u32 port;
if (ib_query_netdev_port(ibdev, ndev, &port))
return;
curr_state = ib_get_curr_port_state(ndev);
write_lock_irq(&ibdev->cache_lock);
if (ibdev->port_data[port].cache.last_port_state == curr_state) {
write_unlock_irq(&ibdev->cache_lock);
return;
}
ibdev->port_data[port].cache.last_port_state = curr_state;
write_unlock_irq(&ibdev->cache_lock);
ibevent.event = (curr_state == IB_PORT_DOWN) ?
IB_EVENT_PORT_ERR : IB_EVENT_PORT_ACTIVE;
ibevent.device = ibdev;
ibevent.element.port_num = port;
ib_dispatch_event(&ibevent);
}
EXPORT_SYMBOL(ib_dispatch_port_state_event);
static void handle_port_event(struct net_device *ndev, unsigned long event)
{
struct ib_device *ibdev;
/* Currently, link events in bonding scenarios are still
* reported by drivers that support bonding.
*/
if (netif_is_lag_master(ndev) || netif_is_lag_port(ndev))
return;
ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
if (!ibdev)
return;
if (ibdev->ops.report_port_event) {
ibdev->ops.report_port_event(ibdev, ndev, event);
goto put_ibdev;
}
ib_dispatch_port_state_event(ibdev, ndev);
put_ibdev:
ib_device_put(ibdev);
};
static int ib_netdevice_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct net_device *ib_ndev;
struct ib_device *ibdev;
u32 port;
@@ -2868,15 +2930,21 @@ static int ib_netdevice_event(struct notifier_block *this,
if (!ibdev)
return NOTIFY_DONE;
rdma_for_each_port(ibdev, port) {
ib_ndev = ib_device_get_netdev(ibdev, port);
if (ndev == ib_ndev)
rdma_nl_notify_event(ibdev, port,
RDMA_NETDEV_RENAME_EVENT);
dev_put(ib_ndev);
if (ib_query_netdev_port(ibdev, ndev, &port)) {
ib_device_put(ibdev);
break;
}
rdma_nl_notify_event(ibdev, port, RDMA_NETDEV_RENAME_EVENT);
ib_device_put(ibdev);
break;
case NETDEV_UP:
case NETDEV_CHANGE:
case NETDEV_DOWN:
handle_port_event(ndev, event);
break;
default:
break;
}

View File

@@ -462,86 +462,3 @@ int ib_ud_header_pack(struct ib_ud_header *header,
return len;
}
EXPORT_SYMBOL(ib_ud_header_pack);
/**
* ib_ud_header_unpack - Unpack UD header struct from wire format
* @header:UD header struct
* @buf:Buffer to pack into
*
* ib_ud_header_pack() unpacks the UD header structure @header from wire
* format in the buffer @buf.
*/
int ib_ud_header_unpack(void *buf,
struct ib_ud_header *header)
{
ib_unpack(lrh_table, ARRAY_SIZE(lrh_table),
buf, &header->lrh);
buf += IB_LRH_BYTES;
if (header->lrh.link_version != 0) {
pr_warn("Invalid LRH.link_version %u\n",
header->lrh.link_version);
return -EINVAL;
}
switch (header->lrh.link_next_header) {
case IB_LNH_IBA_LOCAL:
header->grh_present = 0;
break;
case IB_LNH_IBA_GLOBAL:
header->grh_present = 1;
ib_unpack(grh_table, ARRAY_SIZE(grh_table),
buf, &header->grh);
buf += IB_GRH_BYTES;
if (header->grh.ip_version != 6) {
pr_warn("Invalid GRH.ip_version %u\n",
header->grh.ip_version);
return -EINVAL;
}
if (header->grh.next_header != 0x1b) {
pr_warn("Invalid GRH.next_header 0x%02x\n",
header->grh.next_header);
return -EINVAL;
}
break;
default:
pr_warn("Invalid LRH.link_next_header %u\n",
header->lrh.link_next_header);
return -EINVAL;
}
ib_unpack(bth_table, ARRAY_SIZE(bth_table),
buf, &header->bth);
buf += IB_BTH_BYTES;
switch (header->bth.opcode) {
case IB_OPCODE_UD_SEND_ONLY:
header->immediate_present = 0;
break;
case IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE:
header->immediate_present = 1;
break;
default:
pr_warn("Invalid BTH.opcode 0x%02x\n", header->bth.opcode);
return -EINVAL;
}
if (header->bth.transport_header_version != 0) {
pr_warn("Invalid BTH.transport_header_version %u\n",
header->bth.transport_header_version);
return -EINVAL;
}
ib_unpack(deth_table, ARRAY_SIZE(deth_table),
buf, &header->deth);
buf += IB_DETH_BYTES;
if (header->immediate_present)
memcpy(&header->immediate_data, buf, sizeof header->immediate_data);
return 0;
}
EXPORT_SYMBOL(ib_ud_header_unpack);

View File

@@ -171,45 +171,3 @@ void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
__ib_copy_path_rec_to_user(dst, src);
}
EXPORT_SYMBOL(ib_copy_path_rec_to_user);
void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
struct ib_user_path_rec *src)
{
u32 slid, dlid;
memset(dst, 0, sizeof(*dst));
if ((ib_is_opa_gid((union ib_gid *)src->sgid)) ||
(ib_is_opa_gid((union ib_gid *)src->dgid))) {
dst->rec_type = SA_PATH_REC_TYPE_OPA;
slid = opa_get_lid_from_gid((union ib_gid *)src->sgid);
dlid = opa_get_lid_from_gid((union ib_gid *)src->dgid);
} else {
dst->rec_type = SA_PATH_REC_TYPE_IB;
slid = ntohs(src->slid);
dlid = ntohs(src->dlid);
}
memcpy(dst->dgid.raw, src->dgid, sizeof dst->dgid);
memcpy(dst->sgid.raw, src->sgid, sizeof dst->sgid);
sa_path_set_dlid(dst, dlid);
sa_path_set_slid(dst, slid);
sa_path_set_raw_traffic(dst, src->raw_traffic);
dst->flow_label = src->flow_label;
dst->hop_limit = src->hop_limit;
dst->traffic_class = src->traffic_class;
dst->reversible = src->reversible;
dst->numb_path = src->numb_path;
dst->pkey = src->pkey;
dst->sl = src->sl;
dst->mtu_selector = src->mtu_selector;
dst->mtu = src->mtu;
dst->rate_selector = src->rate_selector;
dst->rate = src->rate;
dst->packet_life_time = src->packet_life_time;
dst->preference = src->preference;
dst->packet_life_time_selector = src->packet_life_time_selector;
/* TODO: No need to set this */
sa_path_set_dmac_zero(dst);
}
EXPORT_SYMBOL(ib_copy_path_rec_from_user);

View File

@@ -11,7 +11,7 @@ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_HNS) += hns/
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns/
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/

View File

@@ -204,7 +204,7 @@ struct bnxt_re_dev {
struct bnxt_re_nq_record *nqr;
/* Device Resources */
struct bnxt_qplib_dev_attr dev_attr;
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_ctx qplib_ctx;
struct bnxt_qplib_res qplib_res;
struct bnxt_qplib_dpi dpi_privileged;
@@ -229,6 +229,9 @@ struct bnxt_re_dev {
DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
struct dentry *dbg_root;
struct dentry *qp_debugfs;
unsigned long event_bitmap;
struct bnxt_qplib_cc_param cc_param;
struct workqueue_struct *dcb_wq;
};
#define to_bnxt_re_dev(ptr, member) \

View File

@@ -37,18 +37,9 @@
*
*/
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/prefetch.h>
#include <linux/delay.h>
#include <rdma/ib_addr.h>
#include "bnxt_ulp.h"
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -357,7 +348,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
bnxt_re_copy_err_stats(rdev, stats, err_s);
if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) &&
if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags) &&
!rdev->is_virtfn) {
rc = bnxt_re_get_ext_stat(rdev, stats);
if (rc) {

View File

@@ -52,8 +52,6 @@
#include <rdma/uverbs_ioctl.h>
#include <linux/hashtable.h>
#include "bnxt_ulp.h"
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -161,7 +159,7 @@ static int __qp_access_flags_to_ib(struct bnxt_qplib_chip_ctx *cctx, u8 qflags)
static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
struct bnxt_qplib_mrw *qplib_mr)
{
if (_is_relaxed_ordering_supported(rdev->dev_attr.dev_cap_flags2) &&
if (_is_relaxed_ordering_supported(rdev->dev_attr->dev_cap_flags2) &&
pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
}
@@ -186,7 +184,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
memset(ib_attr, 0, sizeof(*ib_attr));
memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
@@ -275,7 +273,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr *port_attr)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
int rc;
memset(port_attr, 0, sizeof(*port_attr));
@@ -333,8 +331,8 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1],
rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]);
}
int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
@@ -585,7 +583,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->qplib_mr.pd = &pd->qplib_pd;
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
@@ -1057,7 +1055,7 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
sq = &qplqp->sq;
dev_attr = &rdev->dev_attr;
dev_attr = rdev->dev_attr;
align = sizeof(struct sq_send_hdr);
ilsize = ALIGN(init_attr->cap.max_inline_data, align);
@@ -1277,7 +1275,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
rq = &qplqp->rq;
dev_attr = &rdev->dev_attr;
dev_attr = rdev->dev_attr;
if (init_attr->srq) {
struct bnxt_re_srq *srq;
@@ -1314,7 +1312,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr;
dev_attr = rdev->dev_attr;
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
qplqp->rq.max_sge = dev_attr->max_qp_sges;
@@ -1340,7 +1338,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
sq = &qplqp->sq;
dev_attr = &rdev->dev_attr;
dev_attr = rdev->dev_attr;
sq->max_sge = init_attr->cap.max_send_sge;
entries = init_attr->cap.max_send_wr;
@@ -1393,7 +1391,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr;
dev_attr = rdev->dev_attr;
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
@@ -1442,7 +1440,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr;
dev_attr = rdev->dev_attr;
/* Setup misc params */
ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
@@ -1612,7 +1610,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
ib_pd = ib_qp->pd;
pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
rdev = pd->rdev;
dev_attr = &rdev->dev_attr;
dev_attr = rdev->dev_attr;
qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
@@ -1840,7 +1838,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
ib_pd = ib_srq->pd;
pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
rdev = pd->rdev;
dev_attr = &rdev->dev_attr;
dev_attr = rdev->dev_attr;
srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
@@ -2044,7 +2042,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev;
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
enum ib_qp_state curr_qp_state, new_qp_state;
int rc, entries;
unsigned int flags;
@@ -3091,7 +3089,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata = &attrs->driver_udata;
struct bnxt_re_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
struct bnxt_qplib_chip_ctx *cctx;
int cqe = attr->cqe;
int rc, entries;
@@ -3226,7 +3224,7 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
rdev = cq->rdev;
dev_attr = &rdev->dev_attr;
dev_attr = rdev->dev_attr;
if (!ibcq->uobject) {
ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
return -EOPNOTSUPP;
@@ -4199,7 +4197,7 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64
mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
@@ -4291,7 +4289,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
struct bnxt_re_ucontext *uctx =
container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
struct bnxt_re_user_mmap_entry *entry;
struct bnxt_re_uctx_resp resp = {};
struct bnxt_re_uctx_req ureq = {};
@@ -4467,9 +4465,10 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
case BNXT_RE_MMAP_TOGGLE_PAGE:
/* Driver doesn't expect write access for user space */
if (vma->vm_flags & VM_WRITE)
return -EFAULT;
ret = vm_insert_page(vma, vma->vm_start,
virt_to_page((void *)bnxt_entry->mem_offset));
ret = -EFAULT;
else
ret = vm_insert_page(vma, vma->vm_start,
virt_to_page((void *)bnxt_entry->mem_offset));
break;
default:
ret = -EINVAL;

View File

@@ -79,17 +79,12 @@ MODULE_LICENSE("Dual BSD/GPL");
/* globals */
static DEFINE_MUTEX(bnxt_re_mutex);
static void bnxt_re_stop_irq(void *handle);
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
static int bnxt_re_netdev_event(struct notifier_block *notifier,
unsigned long event, void *ptr);
static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev);
static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type);
static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset);
static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable);
static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
u8 port_num, enum ib_event_type event);
static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *cctx;
@@ -153,6 +148,10 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
if (!rdev->chip_ctx)
return;
kfree(rdev->dev_attr);
rdev->dev_attr = NULL;
chip_ctx = rdev->chip_ctx;
rdev->chip_ctx = NULL;
rdev->rcfw.res = NULL;
@@ -166,7 +165,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
int rc;
int rc = -ENOMEM;
en_dev = rdev->en_dev;
@@ -182,7 +181,10 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
rdev->qplib_res.cctx = rdev->chip_ctx;
rdev->rcfw.res = &rdev->qplib_res;
rdev->qplib_res.dattr = &rdev->dev_attr;
rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL);
if (!rdev->dev_attr)
goto free_chip_ctx;
rdev->qplib_res.dattr = rdev->dev_attr;
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
rdev->qplib_res.en_dev = en_dev;
@@ -190,16 +192,20 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
bnxt_re_set_db_offset(rdev);
rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
if (rc) {
kfree(rdev->chip_ctx);
rdev->chip_ctx = NULL;
return rc;
}
if (rc)
goto free_dev_attr;
if (bnxt_qplib_determine_atomics(en_dev->pdev))
ibdev_info(&rdev->ibdev,
"platform doesn't support global atomics.");
return 0;
free_dev_attr:
kfree(rdev->dev_attr);
rdev->dev_attr = NULL;
free_chip_ctx:
kfree(rdev->chip_ctx);
rdev->chip_ctx = NULL;
return rc;
}
/* SR-IOV helper functions */
@@ -221,7 +227,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
struct bnxt_qplib_ctx *ctx;
int i;
attr = &rdev->dev_attr;
attr = rdev->dev_attr;
ctx = &rdev->qplib_ctx;
ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
@@ -235,7 +241,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
rdev->qplib_ctx.tqm_ctx.qcount[i] =
rdev->dev_attr.tqm_alloc_reqs[i];
rdev->dev_attr->tqm_alloc_reqs[i];
}
static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
@@ -302,17 +308,123 @@ static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
&rdev->qplib_ctx);
}
static void bnxt_re_shutdown(struct auxiliary_device *adev)
{
struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
struct bnxt_re_dcb_work {
struct work_struct work;
struct bnxt_re_dev *rdev;
struct hwrm_async_event_cmpl cmpl;
};
rdev = en_info->rdev;
ib_unregister_device(&rdev->ibdev);
bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
static bool bnxt_re_is_qp1_qp(struct bnxt_re_qp *qp)
{
return qp->ib_qp.qp_type == IB_QPT_GSI;
}
static void bnxt_re_stop_irq(void *handle)
static struct bnxt_re_qp *bnxt_re_get_qp1_qp(struct bnxt_re_dev *rdev)
{
struct bnxt_re_qp *qp;
mutex_lock(&rdev->qp_lock);
list_for_each_entry(qp, &rdev->qp_list, list) {
if (bnxt_re_is_qp1_qp(qp)) {
mutex_unlock(&rdev->qp_lock);
return qp;
}
}
mutex_unlock(&rdev->qp_lock);
return NULL;
}
static int bnxt_re_update_qp1_tos_dscp(struct bnxt_re_dev *rdev)
{
struct bnxt_re_qp *qp;
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
return 0;
qp = bnxt_re_get_qp1_qp(rdev);
if (!qp)
return 0;
qp->qplib_qp.modify_flags = CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP;
qp->qplib_qp.tos_dscp = rdev->cc_param.qp1_tos_dscp;
return bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
}
static void bnxt_re_init_dcb_wq(struct bnxt_re_dev *rdev)
{
rdev->dcb_wq = create_singlethread_workqueue("bnxt_re_dcb_wq");
}
static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev)
{
if (!rdev->dcb_wq)
return;
destroy_workqueue(rdev->dcb_wq);
}
static void bnxt_re_dcb_wq_task(struct work_struct *work)
{
struct bnxt_re_dcb_work *dcb_work =
container_of(work, struct bnxt_re_dcb_work, work);
struct bnxt_re_dev *rdev = dcb_work->rdev;
struct bnxt_qplib_cc_param *cc_param;
int rc;
if (!rdev)
goto free_dcb;
cc_param = &rdev->cc_param;
rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, cc_param);
if (rc) {
ibdev_dbg(&rdev->ibdev, "Failed to query ccparam rc:%d", rc);
goto free_dcb;
}
if (cc_param->qp1_tos_dscp != cc_param->tos_dscp) {
cc_param->qp1_tos_dscp = cc_param->tos_dscp;
rc = bnxt_re_update_qp1_tos_dscp(rdev);
if (rc) {
ibdev_dbg(&rdev->ibdev, "%s: Failed to modify QP1 rc:%d",
__func__, rc);
goto free_dcb;
}
}
free_dcb:
kfree(dcb_work);
}
static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
{
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
struct bnxt_re_dcb_work *dcb_work;
u32 data1, data2;
u16 event_id;
event_id = le16_to_cpu(cmpl->event_id);
data1 = le32_to_cpu(cmpl->event_data1);
data2 = le32_to_cpu(cmpl->event_data2);
ibdev_dbg(&rdev->ibdev, "Async event_id = %d data1 = %d data2 = %d",
event_id, data1, data2);
switch (event_id) {
case ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
if (!dcb_work)
break;
dcb_work->rdev = rdev;
memcpy(&dcb_work->cmpl, cmpl, sizeof(*cmpl));
INIT_WORK(&dcb_work->work, bnxt_re_dcb_wq_task);
queue_work(rdev->dcb_wq, &dcb_work->work);
break;
default:
break;
}
}
static void bnxt_re_stop_irq(void *handle, bool reset)
{
struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
struct bnxt_qplib_rcfw *rcfw;
@@ -323,6 +435,14 @@ static void bnxt_re_stop_irq(void *handle)
rdev = en_info->rdev;
rcfw = &rdev->rcfw;
if (reset) {
set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
wake_up_all(&rdev->rcfw.cmdq.waitq);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
IB_EVENT_DEVICE_FATAL);
}
for (indx = BNXT_RE_NQ_IDX; indx < rdev->nqr->num_msix; indx++) {
nq = &rdev->nqr->nq[indx - 1];
bnxt_qplib_nq_stop_irq(nq, false);
@@ -378,6 +498,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
}
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
.ulp_async_notifier = bnxt_re_async_notifier,
.ulp_irq_stop = bnxt_re_stop_irq,
.ulp_irq_restart = bnxt_re_start_irq
};
@@ -839,17 +960,6 @@ static void bnxt_re_disassociate_ucontext(struct ib_ucontext *ibcontext)
}
/* Device */
static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
{
struct ib_device *ibdev =
ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE);
if (!ibdev)
return NULL;
return container_of(ibdev, struct bnxt_re_dev, ibdev);
}
static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -1627,12 +1737,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
if (rc)
goto fail;
rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
rdev->netdev, &rdev->dev_attr);
rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->netdev);
if (rc)
goto fail;
@@ -1807,6 +1916,26 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return 0;
}
static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
{
if (rdev->is_virtfn)
return;
memset(&rdev->event_bitmap, 0, sizeof(rdev->event_bitmap));
bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
}
static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
{
if (rdev->is_virtfn)
return;
rdev->event_bitmap |= (1 << ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
}
static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
@@ -1886,6 +2015,9 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_debugfs_rem_pdev(rdev);
bnxt_re_net_unregister_async_event(rdev);
bnxt_re_uninit_dcb_wq(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work_sync(&rdev->worker);
@@ -2032,7 +2164,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
rdev->pacing.dbr_pacing = false;
}
}
rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
if (rc)
goto disable_rcfw;
@@ -2081,6 +2213,11 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
if (!rdev->is_virtfn) {
/* Query f/w defaults of CC params */
rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &rdev->cc_param);
if (rc)
ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n");
rc = bnxt_re_setup_qos(rdev);
if (rc)
ibdev_info(&rdev->ibdev,
@@ -2099,6 +2236,9 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_debugfs_add_pdev(rdev);
bnxt_re_init_dcb_wq(rdev);
bnxt_re_net_register_async_event(rdev);
return 0;
free_sctx:
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
@@ -2117,6 +2257,30 @@ fail:
return rc;
}
static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
{
struct bnxt_qplib_cc_param cc_param = {};
/* Do not enable congestion control on VFs */
if (rdev->is_virtfn)
return;
/* Currently enabling only for GenP5 adapters */
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
return;
if (enable) {
cc_param.enable = 1;
cc_param.tos_ecn = 1;
}
cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
}
static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev,
struct bnxt_re_en_dev_info *en_info,
struct auxiliary_device *adev)
@@ -2163,20 +2327,10 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type)
goto re_dev_uninit;
}
rdev->nb.notifier_call = bnxt_re_netdev_event;
rc = register_netdevice_notifier(&rdev->nb);
if (rc) {
rdev->nb.notifier_call = NULL;
pr_err("%s: Cannot register to netdevice_notifier",
ROCE_DRV_MODULE_NAME);
goto re_dev_unreg;
}
bnxt_re_setup_cc(rdev, true);
return 0;
re_dev_unreg:
ib_unregister_device(&rdev->ibdev);
re_dev_uninit:
bnxt_re_update_en_info_rdev(NULL, en_info, adev);
bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
@@ -2186,79 +2340,6 @@ exit:
return rc;
}
static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
{
struct bnxt_qplib_cc_param cc_param = {};
/* Do not enable congestion control on VFs */
if (rdev->is_virtfn)
return;
/* Currently enabling only for GenP5 adapters */
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
return;
if (enable) {
cc_param.enable = 1;
cc_param.tos_ecn = 1;
}
cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
}
/*
* "Notifier chain callback can be invoked for the same chain from
* different CPUs at the same time".
*
* For cases when the netdev is already present, our call to the
* register_netdevice_notifier() will actually get the rtnl_lock()
* before sending NETDEV_REGISTER and (if up) NETDEV_UP
* events.
*
* But for cases when the netdev is not already present, the notifier
* chain is subjected to be invoked from different CPUs simultaneously.
*
* This is protected by the netdev_mutex.
*/
static int bnxt_re_netdev_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
struct bnxt_re_dev *rdev;
real_dev = rdma_vlan_dev_real_dev(netdev);
if (!real_dev)
real_dev = netdev;
if (real_dev != netdev)
goto exit;
rdev = bnxt_re_from_netdev(real_dev);
if (!rdev)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
case NETDEV_DOWN:
case NETDEV_CHANGE:
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
netif_carrier_ok(real_dev) ?
IB_EVENT_PORT_ACTIVE :
IB_EVENT_PORT_ERR);
break;
default:
break;
}
ib_device_put(&rdev->ibdev);
exit:
return NOTIFY_DONE;
}
#define BNXT_ADEV_NAME "bnxt_en"
static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
@@ -2316,13 +2397,9 @@ static int bnxt_re_probe(struct auxiliary_device *adev,
rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT);
if (rc)
goto err;
mutex_unlock(&bnxt_re_mutex);
return 0;
kfree(en_info);
err:
mutex_unlock(&bnxt_re_mutex);
kfree(en_info);
return rc;
}
@@ -2375,6 +2452,16 @@ static int bnxt_re_resume(struct auxiliary_device *adev)
return 0;
}
static void bnxt_re_shutdown(struct auxiliary_device *adev)
{
struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
struct bnxt_re_dev *rdev;
rdev = en_info->rdev;
ib_unregister_device(&rdev->ibdev);
bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
}
static const struct auxiliary_device_id bnxt_re_id_table[] = {
{ .name = BNXT_ADEV_NAME ".rdma", },
{},

View File

@@ -343,6 +343,7 @@ struct bnxt_qplib_qp {
u32 msn;
u32 msn_tbl_sz;
bool is_host_msn_tbl;
u8 tos_dscp;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)

View File

@@ -876,14 +876,13 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
}
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
struct net_device *netdev,
struct bnxt_qplib_dev_attr *dev_attr)
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev)
{
struct bnxt_qplib_dev_attr *dev_attr;
int rc;
res->pdev = pdev;
res->netdev = netdev;
dev_attr = res->dattr;
rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
if (rc)

View File

@@ -424,9 +424,7 @@ int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
struct net_device *netdev,
struct bnxt_qplib_dev_attr *dev_attr);
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,

View File

@@ -88,9 +88,9 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
fw_ver[3] = resp.fw_rsvd;
}
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_dev_attr *attr)
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
{
struct bnxt_qplib_dev_attr *attr = rcfw->res->dattr;
struct creq_query_func_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct creq_query_func_resp_sb *sb;
@@ -1022,3 +1022,116 @@ free_mem:
dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
return rc;
}
static void bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext,
struct creq_query_roce_cc_gen1_resp_sb_tlv *sb)
{
cc_ext->inact_th_hi = le16_to_cpu(sb->inactivity_th_hi);
cc_ext->min_delta_cnp = le16_to_cpu(sb->min_time_between_cnps);
cc_ext->init_cp = le16_to_cpu(sb->init_cp);
cc_ext->tr_update_mode = sb->tr_update_mode;
cc_ext->tr_update_cyls = sb->tr_update_cycles;
cc_ext->fr_rtt = sb->fr_num_rtts;
cc_ext->ai_rate_incr = sb->ai_rate_increase;
cc_ext->rr_rtt_th = le16_to_cpu(sb->reduction_relax_rtts_th);
cc_ext->ar_cr_th = le16_to_cpu(sb->additional_relax_cr_th);
cc_ext->cr_min_th = le16_to_cpu(sb->cr_min_th);
cc_ext->bw_avg_weight = sb->bw_avg_weight;
cc_ext->cr_factor = sb->actual_cr_factor;
cc_ext->cr_th_max_cp = le16_to_cpu(sb->max_cp_cr_th);
cc_ext->cp_bias_en = sb->cp_bias_en;
cc_ext->cp_bias = sb->cp_bias;
cc_ext->cnp_ecn = sb->cnp_ecn;
cc_ext->rtt_jitter_en = sb->rtt_jitter_en;
cc_ext->bytes_per_usec = le16_to_cpu(sb->link_bytes_per_usec);
cc_ext->cc_cr_reset_th = le16_to_cpu(sb->reset_cc_cr_th);
cc_ext->cr_width = sb->cr_width;
cc_ext->min_quota = sb->quota_period_min;
cc_ext->max_quota = sb->quota_period_max;
cc_ext->abs_max_quota = sb->quota_period_abs_max;
cc_ext->tr_lb = le16_to_cpu(sb->tr_lower_bound);
cc_ext->cr_prob_fac = sb->cr_prob_factor;
cc_ext->tr_prob_fac = sb->tr_prob_factor;
cc_ext->fair_cr_th = le16_to_cpu(sb->fairness_cr_th);
cc_ext->red_div = sb->red_div;
cc_ext->cnp_ratio_th = sb->cnp_ratio_th;
cc_ext->ai_ext_rtt = le16_to_cpu(sb->exp_ai_rtts);
cc_ext->exp_crcp_ratio = sb->exp_ai_cr_cp_ratio;
cc_ext->low_rate_en = sb->use_rate_table;
cc_ext->cpcr_update_th = le16_to_cpu(sb->cp_exp_update_th);
cc_ext->ai_rtt_th1 = le16_to_cpu(sb->high_exp_ai_rtts_th1);
cc_ext->ai_rtt_th2 = le16_to_cpu(sb->high_exp_ai_rtts_th2);
cc_ext->cf_rtt_th = le16_to_cpu(sb->actual_cr_cong_free_rtts_th);
cc_ext->sc_cr_th1 = le16_to_cpu(sb->severe_cong_cr_th1);
cc_ext->sc_cr_th2 = le16_to_cpu(sb->severe_cong_cr_th2);
cc_ext->l64B_per_rtt = le32_to_cpu(sb->link64B_per_rtt);
cc_ext->cc_ack_bytes = sb->cc_ack_bytes;
cc_ext->reduce_cf_rtt_th = le16_to_cpu(sb->reduce_init_cong_free_rtts_th);
}
int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param)
{
struct bnxt_qplib_tlv_query_rcc_sb *ext_sb;
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct creq_query_roce_cc_resp resp = {};
struct creq_query_roce_cc_resp_sb *sb;
struct bnxt_qplib_cmdqmsg msg = {};
struct cmdq_query_roce_cc req = {};
struct bnxt_qplib_rcfw_sbuf sbuf;
size_t resp_size;
int rc;
/* Query the parameters from chip */
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_QUERY_ROCE_CC,
sizeof(req));
if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx))
resp_size = sizeof(*ext_sb);
else
resp_size = sizeof(*sb);
sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS);
sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
&sbuf.dma_addr, GFP_KERNEL);
if (!sbuf.sb)
return -ENOMEM;
req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
if (rc)
goto out;
ext_sb = sbuf.sb;
sb = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ? &ext_sb->base_sb :
(struct creq_query_roce_cc_resp_sb *)ext_sb;
cc_param->enable = sb->enable_cc & CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC;
cc_param->tos_ecn = (sb->tos_dscp_tos_ecn &
CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK) >>
CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT;
cc_param->tos_dscp = (sb->tos_dscp_tos_ecn &
CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK) >>
CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT;
cc_param->alt_tos_dscp = sb->alt_tos_dscp;
cc_param->alt_vlan_pcp = sb->alt_vlan_pcp;
cc_param->g = sb->g;
cc_param->nph_per_state = sb->num_phases_per_state;
cc_param->init_cr = le16_to_cpu(sb->init_cr);
cc_param->init_tr = le16_to_cpu(sb->init_tr);
cc_param->cc_mode = sb->cc_mode;
cc_param->inact_th = le16_to_cpu(sb->inactivity_th);
cc_param->rtt = le16_to_cpu(sb->rtt);
cc_param->tcp_cp = le16_to_cpu(sb->tcp_cp);
cc_param->time_pph = sb->time_per_phase;
cc_param->pkts_pph = sb->pkts_per_phase;
if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
bnxt_qplib_read_cc_gen1(&cc_param->cc_ext, &ext_sb->gen1_sb);
cc_param->inact_th |= (cc_param->cc_ext.inact_th_hi & 0x3F) << 16;
}
out:
dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
return rc;
}

View File

@@ -296,6 +296,7 @@ struct bnxt_qplib_cc_param_ext {
struct bnxt_qplib_cc_param {
u8 alt_vlan_pcp;
u8 qp1_tos_dscp;
u16 alt_tos_dscp;
u8 cc_mode;
u8 enable;
@@ -325,8 +326,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_dev_attr *attr);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx);
@@ -355,6 +355,8 @@ int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid,
u32 resp_size, void *resp_va);
int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
#define BNXT_VAR_MAX_WQE 4352
#define BNXT_VAR_MAX_SLOT_ALIGN 256

View File

@@ -1114,8 +1114,10 @@ static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
* The math here assumes sizeof cpl_pass_accept_req >= sizeof
* cpl_rx_pkt.
*/
skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
skb = alloc_skb(size_add(gl->tot_len,
sizeof(struct cpl_pass_accept_req) +
sizeof(struct rss_header)) - pktshift,
GFP_ATOMIC);
if (unlikely(!skb))
return NULL;

View File

@@ -1599,6 +1599,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
int count;
int rq_flushed = 0, sq_flushed;
unsigned long flag;
struct ib_event ev;
pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
@@ -1607,6 +1608,13 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
if (schp != rchp)
spin_lock(&schp->lock);
spin_lock(&qhp->lock);
if (qhp->srq && qhp->attr.state == C4IW_QP_STATE_ERROR &&
qhp->ibqp.event_handler) {
ev.device = qhp->ibqp.device;
ev.element.qp = &qhp->ibqp;
ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
qhp->ibqp.event_handler(&ev, qhp->ibqp.qp_context);
}
if (qhp->wq.flushed) {
spin_unlock(&qhp->lock);

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
* Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_H_
@@ -57,15 +57,15 @@ struct efa_dev {
u64 db_bar_addr;
u64 db_bar_len;
unsigned int num_irq_vectors;
int admin_msix_vector_idx;
u32 num_irq_vectors;
u32 admin_msix_vector_idx;
struct efa_irq admin_irq;
struct efa_stats stats;
/* Array of completion EQs */
struct efa_eq *eqs;
unsigned int neqs;
u32 neqs;
/* Only stores CQs with interrupts enabled */
struct xarray cqs_xa;

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
* Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_H_
@@ -65,7 +65,7 @@ struct efa_com_admin_queue {
u16 depth;
struct efa_com_admin_cq cq;
struct efa_com_admin_sq sq;
u16 msix_vector_idx;
u32 msix_vector_idx;
unsigned long state;
@@ -89,7 +89,7 @@ struct efa_com_aenq {
struct efa_aenq_handlers *aenq_handlers;
dma_addr_t dma_addr;
u32 cc; /* consumer counter */
u16 msix_vector_idx;
u32 msix_vector_idx;
u16 depth;
u8 phase;
};

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
* Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/module.h>
@@ -141,8 +141,7 @@ static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq)
return 0;
}
static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq,
int vector)
static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq, u32 vector)
{
u32 cpu;
@@ -305,7 +304,7 @@ static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq)
efa_free_irq(dev, &eq->irq);
}
static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec)
static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u32 msix_vec)
{
int err;
@@ -328,21 +327,17 @@ err_free_comp_irq:
static int efa_create_eqs(struct efa_dev *dev)
{
unsigned int neqs = dev->dev_attr.max_eq;
int err;
int i;
neqs = min_t(unsigned int, neqs,
dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
u32 neqs = dev->dev_attr.max_eq;
int err, i;
neqs = min_t(u32, neqs, dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
dev->neqs = neqs;
dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
if (!dev->eqs)
return -ENOMEM;
for (i = 0; i < neqs; i++) {
err = efa_create_eq(dev, &dev->eqs[i],
i + EFA_COMP_EQS_VEC_BASE);
err = efa_create_eq(dev, &dev->eqs[i], i + EFA_COMP_EQS_VEC_BASE);
if (err)
goto err_destroy_eqs;
}
@@ -470,7 +465,6 @@ static void efa_ib_device_remove(struct efa_dev *dev)
ibdev_info(&dev->ibdev, "Unregister ib device\n");
ib_unregister_device(&dev->ibdev);
efa_destroy_eqs(dev);
efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
efa_release_doorbell_bar(dev);
}
@@ -643,12 +637,14 @@ err_disable_device:
return ERR_PTR(err);
}
static void efa_remove_device(struct pci_dev *pdev)
static void efa_remove_device(struct pci_dev *pdev,
enum efa_regs_reset_reason_types reset_reason)
{
struct efa_dev *dev = pci_get_drvdata(pdev);
struct efa_com_dev *edev;
edev = &dev->edev;
efa_com_dev_reset(edev, reset_reason);
efa_com_admin_destroy(edev);
efa_free_irq(dev, &dev->admin_irq);
efa_disable_msix(dev);
@@ -676,7 +672,7 @@ static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_remove_device:
efa_remove_device(pdev);
efa_remove_device(pdev, EFA_REGS_RESET_INIT_ERR);
return err;
}
@@ -685,7 +681,7 @@ static void efa_remove(struct pci_dev *pdev)
struct efa_dev *dev = pci_get_drvdata(pdev);
efa_ib_device_remove(dev);
efa_remove_device(pdev);
efa_remove_device(pdev, EFA_REGS_RESET_NORMAL);
}
static void efa_shutdown(struct pci_dev *pdev)

View File

@@ -5,7 +5,7 @@ config INFINIBAND_ERDMA
depends on INFINIBAND_ADDR_TRANS
depends on INFINIBAND_USER_ACCESS
help
This is a RDMA/iWarp driver for Alibaba Elastic RDMA Adapter(ERDMA),
This is a RDMA driver for Alibaba Elastic RDMA Adapter(ERDMA),
which supports RDMA features in Alibaba cloud environment.
To compile this driver as module, choose M here. The module will be

View File

@@ -16,7 +16,7 @@
#include "erdma_hw.h"
#define DRV_MODULE_NAME "erdma"
#define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack"
#define ERDMA_NODE_DESC "Elastic RDMA Adapter stack"
struct erdma_eq {
void *qbuf;
@@ -101,8 +101,6 @@ struct erdma_cmdq {
struct erdma_comp_wait *wait_pool;
spinlock_t lock;
bool use_event;
struct erdma_cmdq_sq sq;
struct erdma_cmdq_cq cq;
struct erdma_eq eq;
@@ -148,6 +146,8 @@ struct erdma_devattr {
u32 max_mr;
u32 max_pd;
u32 max_mw;
u32 max_gid;
u32 max_ah;
u32 local_dma_key;
};
@@ -177,7 +177,8 @@ struct erdma_resource_cb {
enum {
ERDMA_RES_TYPE_PD = 0,
ERDMA_RES_TYPE_STAG_IDX = 1,
ERDMA_RES_CNT = 2,
ERDMA_RES_TYPE_AH = 2,
ERDMA_RES_CNT = 3,
};
struct erdma_dev {
@@ -192,8 +193,6 @@ struct erdma_dev {
u8 __iomem *func_bar;
struct erdma_devattr attrs;
/* physical port state (only one port per device) */
enum ib_port_state state;
u32 mtu;
/* cmdq and aeq use the same msix vector */
@@ -215,6 +214,7 @@ struct erdma_dev {
struct dma_pool *db_pool;
struct dma_pool *resp_pool;
enum erdma_proto_type proto;
};
static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
@@ -265,7 +265,7 @@ void erdma_cmdq_destroy(struct erdma_dev *dev);
void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
u64 *resp0, u64 *resp1);
u64 *resp0, u64 *resp1, bool sleepable);
void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
int erdma_ceqs_init(struct erdma_dev *dev);

View File

@@ -567,7 +567,8 @@ reject_conn:
static int erdma_proc_mpareply(struct erdma_cep *cep)
{
struct erdma_qp_attrs qp_attrs;
enum erdma_qpa_mask_iwarp to_modify_attrs = 0;
struct erdma_mod_qp_params_iwarp params;
struct erdma_qp *qp = cep->qp;
struct mpa_rr *rep;
int ret;
@@ -597,26 +598,29 @@ static int erdma_proc_mpareply(struct erdma_cep *cep)
return -EINVAL;
}
memset(&qp_attrs, 0, sizeof(qp_attrs));
qp_attrs.irq_size = cep->ird;
qp_attrs.orq_size = cep->ord;
qp_attrs.state = ERDMA_QP_STATE_RTS;
memset(&params, 0, sizeof(params));
params.state = ERDMA_QPS_IWARP_RTS;
params.irq_size = cep->ird;
params.orq_size = cep->ord;
down_write(&qp->state_lock);
if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
if (qp->attrs.iwarp.state > ERDMA_QPS_IWARP_RTR) {
ret = -EINVAL;
up_write(&qp->state_lock);
goto out_err;
}
qp->attrs.qp_type = ERDMA_QP_ACTIVE;
if (__mpa_ext_cc(cep->mpa.ext_data.bits) != qp->attrs.cc)
qp->attrs.cc = COMPROMISE_CC;
to_modify_attrs = ERDMA_QPA_IWARP_STATE | ERDMA_QPA_IWARP_LLP_HANDLE |
ERDMA_QPA_IWARP_MPA | ERDMA_QPA_IWARP_IRD |
ERDMA_QPA_IWARP_ORD;
ret = erdma_modify_qp_internal(qp, &qp_attrs,
ERDMA_QP_ATTR_STATE |
ERDMA_QP_ATTR_LLP_HANDLE |
ERDMA_QP_ATTR_MPA);
params.qp_type = ERDMA_QP_ACTIVE;
if (__mpa_ext_cc(cep->mpa.ext_data.bits) != qp->attrs.cc) {
to_modify_attrs |= ERDMA_QPA_IWARP_CC;
params.cc = COMPROMISE_CC;
}
ret = erdma_modify_qp_state_iwarp(qp, &params, to_modify_attrs);
up_write(&qp->state_lock);
@@ -722,7 +726,7 @@ static int erdma_newconn_connected(struct erdma_cep *cep)
__mpa_rr_set_revision(&cep->mpa.hdr.params.bits, MPA_REVISION_EXT_1);
memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, MPA_KEY_SIZE);
cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.iwarp.cookie);
__mpa_ext_set_cc(&cep->mpa.ext_data.bits, cep->qp->attrs.cc);
ret = erdma_send_mpareqrep(cep, cep->private_data, cep->pd_len);
@@ -1126,10 +1130,11 @@ error_put_qp:
int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
{
struct erdma_dev *dev = to_edev(id->device);
struct erdma_cep *cep = (struct erdma_cep *)id->provider_data;
struct erdma_mod_qp_params_iwarp mod_qp_params;
enum erdma_qpa_mask_iwarp to_modify_attrs = 0;
struct erdma_dev *dev = to_edev(id->device);
struct erdma_qp *qp;
struct erdma_qp_attrs qp_attrs;
int ret;
erdma_cep_set_inuse(cep);
@@ -1156,7 +1161,7 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
erdma_qp_get(qp);
down_write(&qp->state_lock);
if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
if (qp->attrs.iwarp.state > ERDMA_QPS_IWARP_RTR) {
ret = -EINVAL;
up_write(&qp->state_lock);
goto error;
@@ -1181,11 +1186,11 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep->cm_id = id;
id->add_ref(id);
memset(&qp_attrs, 0, sizeof(qp_attrs));
qp_attrs.orq_size = params->ord;
qp_attrs.irq_size = params->ird;
memset(&mod_qp_params, 0, sizeof(mod_qp_params));
qp_attrs.state = ERDMA_QP_STATE_RTS;
mod_qp_params.irq_size = params->ird;
mod_qp_params.orq_size = params->ord;
mod_qp_params.state = ERDMA_QPS_IWARP_RTS;
/* Associate QP with CEP */
erdma_cep_get(cep);
@@ -1194,19 +1199,21 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep->state = ERDMA_EPSTATE_RDMA_MODE;
qp->attrs.qp_type = ERDMA_QP_PASSIVE;
qp->attrs.pd_len = params->private_data_len;
mod_qp_params.qp_type = ERDMA_QP_PASSIVE;
mod_qp_params.pd_len = params->private_data_len;
if (qp->attrs.cc != __mpa_ext_cc(cep->mpa.ext_data.bits))
qp->attrs.cc = COMPROMISE_CC;
to_modify_attrs = ERDMA_QPA_IWARP_STATE | ERDMA_QPA_IWARP_ORD |
ERDMA_QPA_IWARP_LLP_HANDLE | ERDMA_QPA_IWARP_IRD |
ERDMA_QPA_IWARP_MPA;
if (qp->attrs.cc != __mpa_ext_cc(cep->mpa.ext_data.bits)) {
to_modify_attrs |= ERDMA_QPA_IWARP_CC;
mod_qp_params.cc = COMPROMISE_CC;
}
/* move to rts */
ret = erdma_modify_qp_internal(qp, &qp_attrs,
ERDMA_QP_ATTR_STATE |
ERDMA_QP_ATTR_ORD |
ERDMA_QP_ATTR_LLP_HANDLE |
ERDMA_QP_ATTR_IRD |
ERDMA_QP_ATTR_MPA);
ret = erdma_modify_qp_state_iwarp(qp, &mod_qp_params, to_modify_attrs);
up_write(&qp->state_lock);
if (ret)
@@ -1214,7 +1221,7 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep->mpa.ext_data.bits = 0;
__mpa_ext_set_cc(&cep->mpa.ext_data.bits, qp->attrs.cc);
cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.iwarp.cookie);
ret = erdma_send_mpareqrep(cep, params->private_data,
params->private_data_len);

View File

@@ -182,7 +182,6 @@ int erdma_cmdq_init(struct erdma_dev *dev)
int err;
cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
cmdq->use_event = false;
sema_init(&cmdq->credits, cmdq->max_outstandings);
@@ -223,8 +222,6 @@ err_destroy_sq:
void erdma_finish_cmdq_init(struct erdma_dev *dev)
{
/* after device init successfully, change cmdq to event mode. */
dev->cmdq.use_event = true;
arm_cmdq_cq(&dev->cmdq);
}
@@ -312,8 +309,7 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
/* Copy 16B comp data after cqe hdr to outer */
be32_to_cpu_array(comp_wait->comp_data, cqe + 2, 4);
if (cmdq->use_event)
complete(&comp_wait->wait_event);
complete(&comp_wait->wait_event);
return 0;
}
@@ -332,9 +328,6 @@ static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
if (erdma_poll_single_cmd_completion(cmdq))
break;
if (comp_num && cmdq->use_event)
arm_cmdq_cq(cmdq);
spin_unlock_irqrestore(&cmdq->cq.lock, flags);
}
@@ -342,8 +335,7 @@ void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
{
int got_event = 0;
if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state) ||
!cmdq->use_event)
if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
return;
while (get_next_valid_eqe(&cmdq->eq)) {
@@ -354,6 +346,7 @@ void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
if (got_event) {
cmdq->cq.cmdsn++;
erdma_polling_cmd_completions(cmdq);
arm_cmdq_cq(cmdq);
}
notify_eq(&cmdq->eq);
@@ -372,7 +365,7 @@ static int erdma_poll_cmd_completion(struct erdma_comp_wait *comp_ctx,
if (time_is_before_jiffies(comp_timeout))
return -ETIME;
msleep(20);
udelay(20);
}
return 0;
@@ -403,7 +396,7 @@ void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
}
int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
u64 *resp0, u64 *resp1)
u64 *resp0, u64 *resp1, bool sleepable)
{
struct erdma_comp_wait *comp_wait;
int ret;
@@ -411,7 +404,12 @@ int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
return -ENODEV;
down(&cmdq->credits);
if (!sleepable) {
while (down_trylock(&cmdq->credits))
;
} else {
down(&cmdq->credits);
}
comp_wait = get_comp_wait(cmdq);
if (IS_ERR(comp_wait)) {
@@ -425,7 +423,7 @@ int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
push_cmdq_sqe(cmdq, req, req_size, comp_wait);
spin_unlock(&cmdq->sq.lock);
if (cmdq->use_event)
if (sleepable)
ret = erdma_wait_cmd_completion(comp_wait, cmdq,
ERDMA_CMDQ_TIMEOUT_MS);
else

View File

@@ -105,6 +105,22 @@ static const struct {
{ ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
};
static void erdma_process_ud_cqe(struct erdma_cqe *cqe, struct ib_wc *wc)
{
u32 ud_info;
wc->wc_flags |= (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
ud_info = be32_to_cpu(cqe->ud.info);
wc->network_hdr_type = FIELD_GET(ERDMA_CQE_NTYPE_MASK, ud_info);
if (wc->network_hdr_type == ERDMA_NETWORK_TYPE_IPV4)
wc->network_hdr_type = RDMA_NETWORK_IPV4;
else
wc->network_hdr_type = RDMA_NETWORK_IPV6;
wc->src_qp = FIELD_GET(ERDMA_CQE_SQPN_MASK, ud_info);
wc->sl = FIELD_GET(ERDMA_CQE_SL_MASK, ud_info);
wc->pkey_index = 0;
}
#define ERDMA_POLLCQ_NO_QP 1
static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
@@ -168,6 +184,10 @@ static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
}
if (erdma_device_rocev2(dev) &&
(qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_GSI))
erdma_process_ud_cqe(cqe, wc);
if (syndrome >= ERDMA_NUM_WC_STATUS)
syndrome = ERDMA_WC_GENERAL_ERR;
@@ -201,3 +221,48 @@ int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return npolled;
}
void erdma_remove_cqes_of_qp(struct ib_cq *ibcq, u32 qpn)
{
struct erdma_cq *cq = to_ecq(ibcq);
struct erdma_cqe *cqe, *dst_cqe;
u32 prev_cq_ci, cur_cq_ci;
u32 ncqe = 0, nqp_cqe = 0;
unsigned long flags;
u8 owner;
spin_lock_irqsave(&cq->kern_cq.lock, flags);
prev_cq_ci = cq->kern_cq.ci;
while (ncqe < cq->depth && (cqe = get_next_valid_cqe(cq)) != NULL) {
++cq->kern_cq.ci;
++ncqe;
}
while (ncqe > 0) {
cur_cq_ci = prev_cq_ci + ncqe - 1;
cqe = get_queue_entry(cq->kern_cq.qbuf, cur_cq_ci, cq->depth,
CQE_SHIFT);
if (be32_to_cpu(cqe->qpn) == qpn) {
++nqp_cqe;
} else if (nqp_cqe) {
dst_cqe = get_queue_entry(cq->kern_cq.qbuf,
cur_cq_ci + nqp_cqe,
cq->depth, CQE_SHIFT);
owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
be32_to_cpu(dst_cqe->hdr));
cqe->hdr = cpu_to_be32(
(be32_to_cpu(cqe->hdr) &
~ERDMA_CQE_HDR_OWNER_MASK) |
FIELD_PREP(ERDMA_CQE_HDR_OWNER_MASK, owner));
memcpy(dst_cqe, cqe, sizeof(*cqe));
}
--ncqe;
}
cq->kern_cq.ci = prev_cq_ci + nqp_cqe;
spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
}

View File

@@ -236,7 +236,8 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
false);
}
static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
@@ -278,7 +279,8 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
req.qtype = ERDMA_EQ_TYPE_CEQ;
req.vector_idx = ceqn + 1;
err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
false);
if (err)
return;

View File

@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/if_ether.h>
/* PCIe device related definition. */
#define ERDMA_PCI_WIDTH 64
@@ -21,8 +22,21 @@
#define ERDMA_NUM_MSIX_VEC 32U
#define ERDMA_MSIX_VECTOR_CMDQ 0
/* RoCEv2 related */
#define ERDMA_ROCEV2_GID_SIZE 16
#define ERDMA_MAX_PKEYS 1
#define ERDMA_DEFAULT_PKEY 0xFFFF
/* erdma device protocol type */
enum erdma_proto_type {
ERDMA_PROTO_IWARP = 0,
ERDMA_PROTO_ROCEV2 = 1,
ERDMA_PROTO_COUNT = 2,
};
/* PCIe Bar0 Registers. */
#define ERDMA_REGS_VERSION_REG 0x0
#define ERDMA_REGS_DEV_PROTO_REG 0xC
#define ERDMA_REGS_DEV_CTRL_REG 0x10
#define ERDMA_REGS_DEV_ST_REG 0x14
#define ERDMA_REGS_NETDEV_MAC_L_REG 0x18
@@ -136,7 +150,11 @@ enum CMDQ_RDMA_OPCODE {
CMDQ_OPCODE_DESTROY_CQ = 5,
CMDQ_OPCODE_REFLUSH = 6,
CMDQ_OPCODE_REG_MR = 8,
CMDQ_OPCODE_DEREG_MR = 9
CMDQ_OPCODE_DEREG_MR = 9,
CMDQ_OPCODE_SET_GID = 14,
CMDQ_OPCODE_CREATE_AH = 15,
CMDQ_OPCODE_DESTROY_AH = 16,
CMDQ_OPCODE_QUERY_QP = 17,
};
enum CMDQ_COMMON_OPCODE {
@@ -284,6 +302,36 @@ struct erdma_cmdq_dereg_mr_req {
u32 cfg;
};
/* create_av cfg0 */
#define ERDMA_CMD_CREATE_AV_FL_MASK GENMASK(19, 0)
#define ERDMA_CMD_CREATE_AV_NTYPE_MASK BIT(20)
struct erdma_av_cfg {
u32 cfg0;
u8 traffic_class;
u8 hop_limit;
u8 sl;
u8 rsvd;
u16 udp_sport;
u16 sgid_index;
u8 dmac[ETH_ALEN];
u8 padding[2];
u8 dgid[ERDMA_ROCEV2_GID_SIZE];
};
struct erdma_cmdq_create_ah_req {
u64 hdr;
u32 pdn;
u32 ahn;
struct erdma_av_cfg av_cfg;
};
struct erdma_cmdq_destroy_ah_req {
u64 hdr;
u32 pdn;
u32 ahn;
};
/* modify qp cfg */
#define ERDMA_CMD_MODIFY_QP_STATE_MASK GENMASK(31, 24)
#define ERDMA_CMD_MODIFY_QP_CC_MASK GENMASK(23, 20)
@@ -301,6 +349,36 @@ struct erdma_cmdq_modify_qp_req {
u32 recv_nxt;
};
/* modify qp cfg1 for roce device */
#define ERDMA_CMD_MODIFY_QP_DQPN_MASK GENMASK(19, 0)
struct erdma_cmdq_mod_qp_req_rocev2 {
u64 hdr;
u32 cfg0;
u32 cfg1;
u32 attr_mask;
u32 qkey;
u32 rq_psn;
u32 sq_psn;
struct erdma_av_cfg av_cfg;
};
/* query qp response mask */
#define ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK GENMASK_ULL(23, 0)
#define ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK GENMASK_ULL(47, 24)
#define ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK GENMASK_ULL(55, 48)
#define ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK GENMASK_ULL(56, 56)
struct erdma_cmdq_query_qp_req_rocev2 {
u64 hdr;
u32 qpn;
};
enum erdma_qp_type {
ERDMA_QPT_RC = 0,
ERDMA_QPT_UD = 1,
};
/* create qp cfg0 */
#define ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK GENMASK(31, 20)
#define ERDMA_CMD_CREATE_QP_QPN_MASK GENMASK(19, 0)
@@ -309,6 +387,9 @@ struct erdma_cmdq_modify_qp_req {
#define ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK GENMASK(31, 20)
#define ERDMA_CMD_CREATE_QP_PD_MASK GENMASK(19, 0)
/* create qp cfg2 */
#define ERDMA_CMD_CREATE_QP_TYPE_MASK GENMASK(3, 0)
/* create qp cqn_mtt_cfg */
#define ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK GENMASK(31, 28)
#define ERDMA_CMD_CREATE_QP_DB_CFG_MASK BIT(25)
@@ -342,6 +423,7 @@ struct erdma_cmdq_create_qp_req {
u64 rq_mtt_entry[3];
u32 db_cfg;
u32 cfg2;
};
struct erdma_cmdq_destroy_qp_req {
@@ -394,10 +476,33 @@ struct erdma_cmdq_query_stats_resp {
u64 rx_pps_meter_drop_packets_cnt;
};
enum erdma_network_type {
ERDMA_NETWORK_TYPE_IPV4 = 0,
ERDMA_NETWORK_TYPE_IPV6 = 1,
};
enum erdma_set_gid_op {
ERDMA_SET_GID_OP_ADD = 0,
ERDMA_SET_GID_OP_DEL = 1,
};
/* set gid cfg */
#define ERDMA_CMD_SET_GID_SGID_IDX_MASK GENMASK(15, 0)
#define ERDMA_CMD_SET_GID_NTYPE_MASK BIT(16)
#define ERDMA_CMD_SET_GID_OP_MASK BIT(31)
struct erdma_cmdq_set_gid_req {
u64 hdr;
u32 cfg;
u8 gid[ERDMA_ROCEV2_GID_SIZE];
};
/* cap qword 0 definition */
#define ERDMA_CMD_DEV_CAP_MAX_GID_MASK GENMASK_ULL(51, 48)
#define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40)
#define ERDMA_CMD_DEV_CAP_FLAGS_MASK GENMASK_ULL(31, 24)
#define ERDMA_CMD_DEV_CAP_MAX_RECV_WR_MASK GENMASK_ULL(23, 16)
#define ERDMA_CMD_DEV_CAP_MAX_AH_MASK GENMASK_ULL(15, 8)
#define ERDMA_CMD_DEV_CAP_MAX_MR_SIZE_MASK GENMASK_ULL(7, 0)
/* cap qword 1 definition */
@@ -426,6 +531,10 @@ enum {
#define ERDMA_CQE_QTYPE_RQ 1
#define ERDMA_CQE_QTYPE_CMDQ 2
#define ERDMA_CQE_NTYPE_MASK BIT(31)
#define ERDMA_CQE_SL_MASK GENMASK(27, 20)
#define ERDMA_CQE_SQPN_MASK GENMASK(19, 0)
struct erdma_cqe {
__be32 hdr;
__be32 qe_idx;
@@ -435,7 +544,16 @@ struct erdma_cqe {
__be32 inv_rkey;
};
__be32 size;
__be32 rsvd[3];
union {
struct {
__be32 rsvd[3];
} rc;
struct {
__be32 rsvd[2];
__be32 info;
} ud;
};
};
struct erdma_sge {
@@ -487,7 +605,7 @@ struct erdma_write_sqe {
struct erdma_sge sgl[];
};
struct erdma_send_sqe {
struct erdma_send_sqe_rc {
__le64 hdr;
union {
__be32 imm_data;
@@ -498,6 +616,17 @@ struct erdma_send_sqe {
struct erdma_sge sgl[];
};
struct erdma_send_sqe_ud {
__le64 hdr;
__be32 imm_data;
__le32 length;
__le32 qkey;
__le32 dst_qpn;
__le32 ahn;
__le32 rsvd;
struct erdma_sge sgl[];
};
struct erdma_readreq_sqe {
__le64 hdr;
__le32 invalid_stag;

View File

@@ -26,14 +26,6 @@ static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
goto done;
switch (event) {
case NETDEV_UP:
dev->state = IB_PORT_ACTIVE;
erdma_port_event(dev, IB_EVENT_PORT_ACTIVE);
break;
case NETDEV_DOWN:
dev->state = IB_PORT_DOWN;
erdma_port_event(dev, IB_EVENT_PORT_ERR);
break;
case NETDEV_CHANGEMTU:
if (dev->mtu != netdev->mtu) {
erdma_set_mtu(dev, netdev->mtu);
@@ -172,6 +164,8 @@ static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
{
int ret;
dev->proto = erdma_reg_read32(dev, ERDMA_REGS_DEV_PROTO_REG);
dev->resp_pool = dma_pool_create("erdma_resp_pool", &pdev->dev,
ERDMA_HW_RESP_SIZE, ERDMA_HW_RESP_SIZE,
0);
@@ -390,7 +384,7 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
CMDQ_OPCODE_QUERY_DEVICE);
err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
&cap1);
&cap1, true);
if (err)
return err;
@@ -398,6 +392,8 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
dev->attrs.max_mr_size = 1ULL << ERDMA_GET_CAP(MAX_MR_SIZE, cap0);
dev->attrs.max_mw = 1 << ERDMA_GET_CAP(MAX_MW, cap1);
dev->attrs.max_recv_wr = 1 << ERDMA_GET_CAP(MAX_RECV_WR, cap0);
dev->attrs.max_gid = 1 << ERDMA_GET_CAP(MAX_GID, cap0);
dev->attrs.max_ah = 1 << ERDMA_GET_CAP(MAX_AH, cap0);
dev->attrs.local_dma_key = ERDMA_GET_CAP(DMA_LOCAL_KEY, cap1);
dev->attrs.cc = ERDMA_GET_CAP(DEFAULT_CC, cap1);
dev->attrs.max_qp = ERDMA_NQP_PER_QBLOCK * ERDMA_GET_CAP(QBLOCK, cap1);
@@ -415,12 +411,13 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
dev->res_cb[ERDMA_RES_TYPE_PD].max_cap = ERDMA_MAX_PD;
dev->res_cb[ERDMA_RES_TYPE_STAG_IDX].max_cap = dev->attrs.max_mr;
dev->res_cb[ERDMA_RES_TYPE_AH].max_cap = dev->attrs.max_ah;
erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_QUERY_FW_INFO);
err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
&cap1);
&cap1, true);
if (!err)
dev->attrs.fw_version =
FIELD_GET(ERDMA_CMD_INFO0_FW_VER_MASK, cap0);
@@ -441,7 +438,8 @@ static int erdma_device_config(struct erdma_dev *dev)
req.cfg = FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PGSHIFT_MASK, PAGE_SHIFT) |
FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PS_EN_MASK, 1);
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
true);
}
static int erdma_res_cb_init(struct erdma_dev *dev)
@@ -474,6 +472,29 @@ static void erdma_res_cb_free(struct erdma_dev *dev)
bitmap_free(dev->res_cb[i].bitmap);
}
static const struct ib_device_ops erdma_device_ops_rocev2 = {
.get_link_layer = erdma_get_link_layer,
.add_gid = erdma_add_gid,
.del_gid = erdma_del_gid,
.query_pkey = erdma_query_pkey,
.create_ah = erdma_create_ah,
.destroy_ah = erdma_destroy_ah,
.query_ah = erdma_query_ah,
INIT_RDMA_OBJ_SIZE(ib_ah, erdma_ah, ibah),
};
static const struct ib_device_ops erdma_device_ops_iwarp = {
.iw_accept = erdma_accept,
.iw_add_ref = erdma_qp_get_ref,
.iw_connect = erdma_connect,
.iw_create_listen = erdma_create_listen,
.iw_destroy_listen = erdma_destroy_listen,
.iw_get_qp = erdma_get_ibqp,
.iw_reject = erdma_reject,
.iw_rem_ref = erdma_qp_put_ref,
};
static const struct ib_device_ops erdma_device_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_ERDMA,
@@ -494,18 +515,9 @@ static const struct ib_device_ops erdma_device_ops = {
.get_dma_mr = erdma_get_dma_mr,
.get_hw_stats = erdma_get_hw_stats,
.get_port_immutable = erdma_get_port_immutable,
.iw_accept = erdma_accept,
.iw_add_ref = erdma_qp_get_ref,
.iw_connect = erdma_connect,
.iw_create_listen = erdma_create_listen,
.iw_destroy_listen = erdma_destroy_listen,
.iw_get_qp = erdma_get_ibqp,
.iw_reject = erdma_reject,
.iw_rem_ref = erdma_qp_put_ref,
.map_mr_sg = erdma_map_mr_sg,
.mmap = erdma_mmap,
.mmap_free = erdma_mmap_free,
.modify_qp = erdma_modify_qp,
.post_recv = erdma_post_recv,
.post_send = erdma_post_send,
.poll_cq = erdma_poll_cq,
@@ -515,6 +527,7 @@ static const struct ib_device_ops erdma_device_ops = {
.query_qp = erdma_query_qp,
.req_notify_cq = erdma_req_notify_cq,
.reg_user_mr = erdma_reg_user_mr,
.modify_qp = erdma_modify_qp,
INIT_RDMA_OBJ_SIZE(ib_cq, erdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, erdma_pd, ibpd),
@@ -537,7 +550,14 @@ static int erdma_ib_device_add(struct pci_dev *pdev)
if (ret)
return ret;
ibdev->node_type = RDMA_NODE_RNIC;
if (erdma_device_iwarp(dev)) {
ibdev->node_type = RDMA_NODE_RNIC;
ib_set_device_ops(ibdev, &erdma_device_ops_iwarp);
} else {
ibdev->node_type = RDMA_NODE_IB_CA;
ib_set_device_ops(ibdev, &erdma_device_ops_rocev2);
}
memcpy(ibdev->node_desc, ERDMA_NODE_DESC, sizeof(ERDMA_NODE_DESC));
/*

View File

@@ -11,20 +11,20 @@
void erdma_qp_llp_close(struct erdma_qp *qp)
{
struct erdma_qp_attrs qp_attrs;
struct erdma_mod_qp_params_iwarp params;
down_write(&qp->state_lock);
switch (qp->attrs.state) {
case ERDMA_QP_STATE_RTS:
case ERDMA_QP_STATE_RTR:
case ERDMA_QP_STATE_IDLE:
case ERDMA_QP_STATE_TERMINATE:
qp_attrs.state = ERDMA_QP_STATE_CLOSING;
erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
switch (qp->attrs.iwarp.state) {
case ERDMA_QPS_IWARP_RTS:
case ERDMA_QPS_IWARP_RTR:
case ERDMA_QPS_IWARP_IDLE:
case ERDMA_QPS_IWARP_TERMINATE:
params.state = ERDMA_QPS_IWARP_CLOSING;
erdma_modify_qp_state_iwarp(qp, &params, ERDMA_QPA_IWARP_STATE);
break;
case ERDMA_QP_STATE_CLOSING:
qp->attrs.state = ERDMA_QP_STATE_IDLE;
case ERDMA_QPS_IWARP_CLOSING:
qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
break;
default:
break;
@@ -48,9 +48,10 @@ struct ib_qp *erdma_get_ibqp(struct ib_device *ibdev, int id)
return NULL;
}
static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
struct erdma_qp_attrs *attrs,
enum erdma_qp_attr_mask mask)
static int
erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
struct erdma_mod_qp_params_iwarp *params,
enum erdma_qpa_mask_iwarp mask)
{
int ret;
struct erdma_dev *dev = qp->dev;
@@ -59,12 +60,15 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
struct erdma_cep *cep = qp->cep;
struct sockaddr_storage local_addr, remote_addr;
if (!(mask & ERDMA_QP_ATTR_LLP_HANDLE))
if (!(mask & ERDMA_QPA_IWARP_LLP_HANDLE))
return -EINVAL;
if (!(mask & ERDMA_QP_ATTR_MPA))
if (!(mask & ERDMA_QPA_IWARP_MPA))
return -EINVAL;
if (!(mask & ERDMA_QPA_IWARP_CC))
params->cc = qp->attrs.cc;
ret = getname_local(cep->sock, &local_addr);
if (ret < 0)
return ret;
@@ -73,18 +77,16 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
if (ret < 0)
return ret;
qp->attrs.state = ERDMA_QP_STATE_RTS;
tp = tcp_sk(qp->cep->sock->sk);
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_MODIFY_QP);
req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, qp->attrs.state) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, qp->attrs.cc) |
req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, params->state) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, params->cc) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
req.cookie = be32_to_cpu(qp->cep->mpa.ext_data.cookie);
req.cookie = be32_to_cpu(cep->mpa.ext_data.cookie);
req.dip = to_sockaddr_in(remote_addr).sin_addr.s_addr;
req.sip = to_sockaddr_in(local_addr).sin_addr.s_addr;
req.dport = to_sockaddr_in(remote_addr).sin_port;
@@ -92,33 +94,57 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
req.send_nxt = tp->snd_nxt;
/* rsvd tcp seq for mpa-rsp in server. */
if (qp->attrs.qp_type == ERDMA_QP_PASSIVE)
req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
if (params->qp_type == ERDMA_QP_PASSIVE)
req.send_nxt += MPA_DEFAULT_HDR_LEN + params->pd_len;
req.recv_nxt = tp->rcv_nxt;
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
true);
if (ret)
return ret;
if (mask & ERDMA_QPA_IWARP_IRD)
qp->attrs.irq_size = params->irq_size;
if (mask & ERDMA_QPA_IWARP_ORD)
qp->attrs.orq_size = params->orq_size;
if (mask & ERDMA_QPA_IWARP_CC)
qp->attrs.cc = params->cc;
qp->attrs.iwarp.state = ERDMA_QPS_IWARP_RTS;
return 0;
}
static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
struct erdma_qp_attrs *attrs,
enum erdma_qp_attr_mask mask)
static int
erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
struct erdma_mod_qp_params_iwarp *params,
enum erdma_qpa_mask_iwarp mask)
{
struct erdma_dev *dev = qp->dev;
struct erdma_cmdq_modify_qp_req req;
qp->attrs.state = attrs->state;
int ret;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_MODIFY_QP);
req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, params->state) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
true);
if (ret)
return ret;
qp->attrs.iwarp.state = params->state;
return 0;
}
int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
enum erdma_qp_attr_mask mask)
int erdma_modify_qp_state_iwarp(struct erdma_qp *qp,
struct erdma_mod_qp_params_iwarp *params,
int mask)
{
bool need_reflush = false;
int drop_conn, ret = 0;
@@ -126,31 +152,31 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
if (!mask)
return 0;
if (!(mask & ERDMA_QP_ATTR_STATE))
if (!(mask & ERDMA_QPA_IWARP_STATE))
return 0;
switch (qp->attrs.state) {
case ERDMA_QP_STATE_IDLE:
case ERDMA_QP_STATE_RTR:
if (attrs->state == ERDMA_QP_STATE_RTS) {
ret = erdma_modify_qp_state_to_rts(qp, attrs, mask);
} else if (attrs->state == ERDMA_QP_STATE_ERROR) {
qp->attrs.state = ERDMA_QP_STATE_ERROR;
switch (qp->attrs.iwarp.state) {
case ERDMA_QPS_IWARP_IDLE:
case ERDMA_QPS_IWARP_RTR:
if (params->state == ERDMA_QPS_IWARP_RTS) {
ret = erdma_modify_qp_state_to_rts(qp, params, mask);
} else if (params->state == ERDMA_QPS_IWARP_ERROR) {
qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
need_reflush = true;
if (qp->cep) {
erdma_cep_put(qp->cep);
qp->cep = NULL;
}
ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
ret = erdma_modify_qp_state_to_stop(qp, params, mask);
}
break;
case ERDMA_QP_STATE_RTS:
case ERDMA_QPS_IWARP_RTS:
drop_conn = 0;
if (attrs->state == ERDMA_QP_STATE_CLOSING ||
attrs->state == ERDMA_QP_STATE_TERMINATE ||
attrs->state == ERDMA_QP_STATE_ERROR) {
ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
if (params->state == ERDMA_QPS_IWARP_CLOSING ||
params->state == ERDMA_QPS_IWARP_TERMINATE ||
params->state == ERDMA_QPS_IWARP_ERROR) {
ret = erdma_modify_qp_state_to_stop(qp, params, mask);
drop_conn = 1;
need_reflush = true;
}
@@ -159,17 +185,17 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
erdma_qp_cm_drop(qp);
break;
case ERDMA_QP_STATE_TERMINATE:
if (attrs->state == ERDMA_QP_STATE_ERROR)
qp->attrs.state = ERDMA_QP_STATE_ERROR;
case ERDMA_QPS_IWARP_TERMINATE:
if (params->state == ERDMA_QPS_IWARP_ERROR)
qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
break;
case ERDMA_QP_STATE_CLOSING:
if (attrs->state == ERDMA_QP_STATE_IDLE) {
qp->attrs.state = ERDMA_QP_STATE_IDLE;
} else if (attrs->state == ERDMA_QP_STATE_ERROR) {
ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
qp->attrs.state = ERDMA_QP_STATE_ERROR;
} else if (attrs->state != ERDMA_QP_STATE_CLOSING) {
case ERDMA_QPS_IWARP_CLOSING:
if (params->state == ERDMA_QPS_IWARP_IDLE) {
qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
} else if (params->state == ERDMA_QPS_IWARP_ERROR) {
ret = erdma_modify_qp_state_to_stop(qp, params, mask);
qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
} else if (params->state != ERDMA_QPS_IWARP_CLOSING) {
return -ECONNABORTED;
}
break;
@@ -186,6 +212,98 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
return ret;
}
static int modify_qp_cmd_rocev2(struct erdma_qp *qp,
struct erdma_mod_qp_params_rocev2 *params,
enum erdma_qpa_mask_rocev2 attr_mask)
{
struct erdma_cmdq_mod_qp_req_rocev2 req;
memset(&req, 0, sizeof(req));
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_MODIFY_QP);
req.cfg0 = FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
if (attr_mask & ERDMA_QPA_ROCEV2_STATE)
req.cfg0 |= FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK,
params->state);
if (attr_mask & ERDMA_QPA_ROCEV2_DST_QPN)
req.cfg1 = FIELD_PREP(ERDMA_CMD_MODIFY_QP_DQPN_MASK,
params->dst_qpn);
if (attr_mask & ERDMA_QPA_ROCEV2_QKEY)
req.qkey = params->qkey;
if (attr_mask & ERDMA_QPA_ROCEV2_AV)
erdma_set_av_cfg(&req.av_cfg, &params->av);
if (attr_mask & ERDMA_QPA_ROCEV2_SQ_PSN)
req.sq_psn = params->sq_psn;
if (attr_mask & ERDMA_QPA_ROCEV2_RQ_PSN)
req.rq_psn = params->rq_psn;
req.attr_mask = attr_mask;
return erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL,
NULL, true);
}
static void erdma_reset_qp(struct erdma_qp *qp)
{
qp->kern_qp.sq_pi = 0;
qp->kern_qp.sq_ci = 0;
qp->kern_qp.rq_pi = 0;
qp->kern_qp.rq_ci = 0;
memset(qp->kern_qp.swr_tbl, 0, qp->attrs.sq_size * sizeof(u64));
memset(qp->kern_qp.rwr_tbl, 0, qp->attrs.rq_size * sizeof(u64));
memset(qp->kern_qp.sq_buf, 0, qp->attrs.sq_size << SQEBB_SHIFT);
memset(qp->kern_qp.rq_buf, 0, qp->attrs.rq_size << RQE_SHIFT);
erdma_remove_cqes_of_qp(&qp->scq->ibcq, QP_ID(qp));
if (qp->rcq != qp->scq)
erdma_remove_cqes_of_qp(&qp->rcq->ibcq, QP_ID(qp));
}
int erdma_modify_qp_state_rocev2(struct erdma_qp *qp,
struct erdma_mod_qp_params_rocev2 *params,
int attr_mask)
{
struct erdma_dev *dev = to_edev(qp->ibqp.device);
int ret;
ret = modify_qp_cmd_rocev2(qp, params, attr_mask);
if (ret)
return ret;
if (attr_mask & ERDMA_QPA_ROCEV2_STATE)
qp->attrs.rocev2.state = params->state;
if (attr_mask & ERDMA_QPA_ROCEV2_QKEY)
qp->attrs.rocev2.qkey = params->qkey;
if (attr_mask & ERDMA_QPA_ROCEV2_DST_QPN)
qp->attrs.rocev2.dst_qpn = params->dst_qpn;
if (attr_mask & ERDMA_QPA_ROCEV2_AV)
memcpy(&qp->attrs.rocev2.av, &params->av,
sizeof(struct erdma_av));
if (rdma_is_kernel_res(&qp->ibqp.res) &&
params->state == ERDMA_QPS_ROCEV2_RESET)
erdma_reset_qp(qp);
if (rdma_is_kernel_res(&qp->ibqp.res) &&
params->state == ERDMA_QPS_ROCEV2_ERROR) {
qp->flags |= ERDMA_QP_IN_FLUSHING;
mod_delayed_work(dev->reflush_wq, &qp->reflush_dwork,
usecs_to_jiffies(100));
}
return 0;
}
static void erdma_qp_safe_free(struct kref *ref)
{
struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
@@ -282,17 +400,57 @@ static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
return 0;
}
static void init_send_sqe_rc(struct erdma_qp *qp, struct erdma_send_sqe_rc *sqe,
const struct ib_send_wr *wr, u32 *hw_op)
{
u32 op = ERDMA_OP_SEND;
if (wr->opcode == IB_WR_SEND_WITH_IMM) {
op = ERDMA_OP_SEND_WITH_IMM;
sqe->imm_data = wr->ex.imm_data;
} else if (wr->opcode == IB_WR_SEND_WITH_INV) {
op = ERDMA_OP_SEND_WITH_INV;
sqe->invalid_stag = cpu_to_le32(wr->ex.invalidate_rkey);
}
*hw_op = op;
}
static void init_send_sqe_ud(struct erdma_qp *qp, struct erdma_send_sqe_ud *sqe,
const struct ib_send_wr *wr, u32 *hw_op)
{
const struct ib_ud_wr *uwr = ud_wr(wr);
struct erdma_ah *ah = to_eah(uwr->ah);
u32 op = ERDMA_OP_SEND;
if (wr->opcode == IB_WR_SEND_WITH_IMM) {
op = ERDMA_OP_SEND_WITH_IMM;
sqe->imm_data = wr->ex.imm_data;
}
*hw_op = op;
sqe->ahn = cpu_to_le32(ah->ahn);
sqe->dst_qpn = cpu_to_le32(uwr->remote_qpn);
/* Not allowed to send control qkey */
if (uwr->remote_qkey & 0x80000000)
sqe->qkey = cpu_to_le32(qp->attrs.rocev2.qkey);
else
sqe->qkey = cpu_to_le32(uwr->remote_qkey);
}
static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
const struct ib_send_wr *send_wr)
{
u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
u32 idx = *pi & (qp->attrs.sq_size - 1);
enum ib_wr_opcode op = send_wr->opcode;
struct erdma_send_sqe_rc *rc_send_sqe;
struct erdma_send_sqe_ud *ud_send_sqe;
struct erdma_atomic_sqe *atomic_sqe;
struct erdma_readreq_sqe *read_sqe;
struct erdma_reg_mr_sqe *regmr_sge;
struct erdma_write_sqe *write_sqe;
struct erdma_send_sqe *send_sqe;
struct ib_rdma_wr *rdma_wr;
struct erdma_sge *sge;
__le32 *length_field;
@@ -301,6 +459,10 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
u32 attrs;
int ret;
if (qp->ibqp.qp_type != IB_QPT_RC && send_wr->opcode != IB_WR_SEND &&
send_wr->opcode != IB_WR_SEND_WITH_IMM)
return -EINVAL;
entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
SQEBB_SHIFT);
@@ -374,21 +536,20 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
case IB_WR_SEND_WITH_INV:
send_sqe = (struct erdma_send_sqe *)entry;
hw_op = ERDMA_OP_SEND;
if (op == IB_WR_SEND_WITH_IMM) {
hw_op = ERDMA_OP_SEND_WITH_IMM;
send_sqe->imm_data = send_wr->ex.imm_data;
} else if (op == IB_WR_SEND_WITH_INV) {
hw_op = ERDMA_OP_SEND_WITH_INV;
send_sqe->invalid_stag =
cpu_to_le32(send_wr->ex.invalidate_rkey);
if (qp->ibqp.qp_type == IB_QPT_RC) {
rc_send_sqe = (struct erdma_send_sqe_rc *)entry;
init_send_sqe_rc(qp, rc_send_sqe, send_wr, &hw_op);
length_field = &rc_send_sqe->length;
wqe_size = sizeof(struct erdma_send_sqe_rc);
} else {
ud_send_sqe = (struct erdma_send_sqe_ud *)entry;
init_send_sqe_ud(qp, ud_send_sqe, send_wr, &hw_op);
length_field = &ud_send_sqe->length;
wqe_size = sizeof(struct erdma_send_sqe_ud);
}
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
length_field = &send_sqe->length;
wqe_size = sizeof(struct erdma_send_sqe);
sgl_offset = wqe_size;
sgl_offset = wqe_size;
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
break;
case IB_WR_REG_MR:
wqe_hdr |=

View File

@@ -55,6 +55,13 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
ilog2(qp->attrs.rq_size)) |
FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
if (qp->ibqp.qp_type == IB_QPT_RC)
req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK,
ERDMA_QPT_RC);
else
req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK,
ERDMA_QPT_UD);
if (rdma_is_kernel_res(&qp->ibqp.res)) {
u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT;
@@ -119,10 +126,10 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
}
}
err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
&resp1);
if (!err)
qp->attrs.cookie =
err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0, &resp1,
true);
if (!err && erdma_device_iwarp(dev))
qp->attrs.iwarp.cookie =
FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0);
return err;
@@ -178,7 +185,8 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
}
post_cmd:
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
true);
}
static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
@@ -240,7 +248,8 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
}
}
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
true);
}
static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
@@ -336,6 +345,11 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA;
attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT;
if (erdma_device_rocev2(dev)) {
attr->max_pkeys = ERDMA_MAX_PKEYS;
attr->max_ah = dev->attrs.max_ah;
}
if (dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_ATOMIC)
attr->atomic_cap = IB_ATOMIC_GLOB;
@@ -367,7 +381,14 @@ int erdma_query_port(struct ib_device *ibdev, u32 port,
memset(attr, 0, sizeof(*attr));
attr->gid_tbl_len = 1;
if (erdma_device_iwarp(dev)) {
attr->gid_tbl_len = 1;
} else {
attr->gid_tbl_len = dev->attrs.max_gid;
attr->ip_gids = true;
attr->pkey_tbl_len = ERDMA_MAX_PKEYS;
}
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
attr->max_msg_sz = -1;
@@ -377,14 +398,10 @@ int erdma_query_port(struct ib_device *ibdev, u32 port,
ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width);
attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu);
attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
if (netif_running(ndev) && netif_carrier_ok(ndev))
dev->state = IB_PORT_ACTIVE;
else
dev->state = IB_PORT_DOWN;
attr->state = dev->state;
attr->state = ib_get_curr_port_state(ndev);
out:
if (dev->state == IB_PORT_ACTIVE)
if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
else
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
@@ -395,8 +412,18 @@ out:
int erdma_get_port_immutable(struct ib_device *ibdev, u32 port,
struct ib_port_immutable *port_immutable)
{
port_immutable->gid_tbl_len = 1;
port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
struct erdma_dev *dev = to_edev(ibdev);
if (erdma_device_iwarp(dev)) {
port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
port_immutable->gid_tbl_len = 1;
} else {
port_immutable->core_cap_flags =
RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
port_immutable->max_mad_size = IB_MGMT_MAD_SIZE;
port_immutable->gid_tbl_len = dev->attrs.max_gid;
port_immutable->pkey_tbl_len = ERDMA_MAX_PKEYS;
}
return 0;
}
@@ -438,7 +465,8 @@ static void erdma_flush_worker(struct work_struct *work)
req.qpn = QP_ID(qp);
req.sq_pi = qp->kern_qp.sq_pi;
req.rq_pi = qp->kern_qp.rq_pi;
erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL);
erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL,
true);
}
static int erdma_qp_validate_cap(struct erdma_dev *dev,
@@ -459,7 +487,11 @@ static int erdma_qp_validate_cap(struct erdma_dev *dev,
static int erdma_qp_validate_attr(struct erdma_dev *dev,
struct ib_qp_init_attr *attrs)
{
if (attrs->qp_type != IB_QPT_RC)
if (erdma_device_iwarp(dev) && attrs->qp_type != IB_QPT_RC)
return -EOPNOTSUPP;
if (erdma_device_rocev2(dev) && attrs->qp_type != IB_QPT_RC &&
attrs->qp_type != IB_QPT_UD && attrs->qp_type != IB_QPT_GSI)
return -EOPNOTSUPP;
if (attrs->srq)
@@ -937,7 +969,8 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
udata, struct erdma_ucontext, ibucontext);
struct erdma_ureq_create_qp ureq;
struct erdma_uresp_create_qp uresp;
int ret;
void *old_entry;
int ret = 0;
ret = erdma_qp_validate_cap(dev, attrs);
if (ret)
@@ -956,9 +989,16 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
kref_init(&qp->ref);
init_completion(&qp->safe_free);
ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
XA_LIMIT(1, dev->attrs.max_qp - 1),
&dev->next_alloc_qpn, GFP_KERNEL);
if (qp->ibqp.qp_type == IB_QPT_GSI) {
old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL);
if (xa_is_err(old_entry))
ret = xa_err(old_entry);
} else {
ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
XA_LIMIT(1, dev->attrs.max_qp - 1),
&dev->next_alloc_qpn, GFP_KERNEL);
}
if (ret < 0) {
ret = -ENOMEM;
goto err_out;
@@ -995,7 +1035,12 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
qp->attrs.max_send_sge = attrs->cap.max_send_sge;
qp->attrs.max_recv_sge = attrs->cap.max_recv_sge;
qp->attrs.state = ERDMA_QP_STATE_IDLE;
if (erdma_device_iwarp(qp->dev))
qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
else
qp->attrs.rocev2.state = ERDMA_QPS_ROCEV2_RESET;
INIT_DELAYED_WORK(&qp->reflush_dwork, erdma_flush_worker);
ret = create_qp_cmd(uctx, qp);
@@ -1219,7 +1264,8 @@ int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
true);
if (ret)
return ret;
@@ -1244,7 +1290,8 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_CQ);
req.cqn = cq->cqn;
err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
true);
if (err)
return err;
@@ -1269,13 +1316,20 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
struct erdma_dev *dev = to_edev(ibqp->device);
struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
udata, struct erdma_ucontext, ibucontext);
struct erdma_qp_attrs qp_attrs;
int err;
struct erdma_cmdq_destroy_qp_req req;
union erdma_mod_qp_params params;
int err;
down_write(&qp->state_lock);
qp_attrs.state = ERDMA_QP_STATE_ERROR;
erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
if (erdma_device_iwarp(dev)) {
params.iwarp.state = ERDMA_QPS_IWARP_ERROR;
erdma_modify_qp_state_iwarp(qp, &params.iwarp,
ERDMA_QPA_IWARP_STATE);
} else {
params.rocev2.state = ERDMA_QPS_ROCEV2_ERROR;
erdma_modify_qp_state_rocev2(qp, &params.rocev2,
ERDMA_QPA_ROCEV2_STATE);
}
up_write(&qp->state_lock);
cancel_delayed_work_sync(&qp->reflush_dwork);
@@ -1284,7 +1338,8 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_QP);
req.qpn = QP_ID(qp);
err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
true);
if (err)
return err;
@@ -1382,7 +1437,8 @@ static int alloc_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx,
FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1);
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1,
true);
if (ret)
return ret;
@@ -1417,7 +1473,8 @@ static void free_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx)
req.rdb_off = ctx->ext_db.rdb_off;
req.cdb_off = ctx->ext_db.cdb_off;
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
true);
if (ret)
ibdev_err_ratelimited(&dev->ibdev,
"free db resources failed %d", ret);
@@ -1506,69 +1563,248 @@ void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
atomic_dec(&dev->num_ctx);
}
static int ib_qp_state_to_erdma_qp_state[IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = ERDMA_QP_STATE_IDLE,
[IB_QPS_INIT] = ERDMA_QP_STATE_IDLE,
[IB_QPS_RTR] = ERDMA_QP_STATE_RTR,
[IB_QPS_RTS] = ERDMA_QP_STATE_RTS,
[IB_QPS_SQD] = ERDMA_QP_STATE_CLOSING,
[IB_QPS_SQE] = ERDMA_QP_STATE_TERMINATE,
[IB_QPS_ERR] = ERDMA_QP_STATE_ERROR
static void erdma_attr_to_av(const struct rdma_ah_attr *ah_attr,
struct erdma_av *av, u16 sport)
{
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
av->port = rdma_ah_get_port_num(ah_attr);
av->sgid_index = grh->sgid_index;
av->hop_limit = grh->hop_limit;
av->traffic_class = grh->traffic_class;
av->sl = rdma_ah_get_sl(ah_attr);
av->flow_label = grh->flow_label;
av->udp_sport = sport;
ether_addr_copy(av->dmac, ah_attr->roce.dmac);
memcpy(av->dgid, grh->dgid.raw, ERDMA_ROCEV2_GID_SIZE);
if (ipv6_addr_v4mapped((struct in6_addr *)&grh->dgid))
av->ntype = ERDMA_NETWORK_TYPE_IPV4;
else
av->ntype = ERDMA_NETWORK_TYPE_IPV6;
}
static void erdma_av_to_attr(struct erdma_av *av, struct rdma_ah_attr *ah_attr)
{
ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
rdma_ah_set_sl(ah_attr, av->sl);
rdma_ah_set_port_num(ah_attr, av->port);
rdma_ah_set_ah_flags(ah_attr, IB_AH_GRH);
rdma_ah_set_grh(ah_attr, NULL, av->flow_label, av->sgid_index,
av->hop_limit, av->traffic_class);
rdma_ah_set_dgid_raw(ah_attr, av->dgid);
}
static int ib_qps_to_erdma_qps[ERDMA_PROTO_COUNT][IB_QPS_ERR + 1] = {
[ERDMA_PROTO_IWARP] = {
[IB_QPS_RESET] = ERDMA_QPS_IWARP_IDLE,
[IB_QPS_INIT] = ERDMA_QPS_IWARP_IDLE,
[IB_QPS_RTR] = ERDMA_QPS_IWARP_RTR,
[IB_QPS_RTS] = ERDMA_QPS_IWARP_RTS,
[IB_QPS_SQD] = ERDMA_QPS_IWARP_CLOSING,
[IB_QPS_SQE] = ERDMA_QPS_IWARP_TERMINATE,
[IB_QPS_ERR] = ERDMA_QPS_IWARP_ERROR,
},
[ERDMA_PROTO_ROCEV2] = {
[IB_QPS_RESET] = ERDMA_QPS_ROCEV2_RESET,
[IB_QPS_INIT] = ERDMA_QPS_ROCEV2_INIT,
[IB_QPS_RTR] = ERDMA_QPS_ROCEV2_RTR,
[IB_QPS_RTS] = ERDMA_QPS_ROCEV2_RTS,
[IB_QPS_SQD] = ERDMA_QPS_ROCEV2_SQD,
[IB_QPS_SQE] = ERDMA_QPS_ROCEV2_SQE,
[IB_QPS_ERR] = ERDMA_QPS_ROCEV2_ERROR,
},
};
static int erdma_qps_to_ib_qps[ERDMA_PROTO_COUNT][ERDMA_QPS_ROCEV2_COUNT] = {
[ERDMA_PROTO_IWARP] = {
[ERDMA_QPS_IWARP_IDLE] = IB_QPS_INIT,
[ERDMA_QPS_IWARP_RTR] = IB_QPS_RTR,
[ERDMA_QPS_IWARP_RTS] = IB_QPS_RTS,
[ERDMA_QPS_IWARP_CLOSING] = IB_QPS_ERR,
[ERDMA_QPS_IWARP_TERMINATE] = IB_QPS_ERR,
[ERDMA_QPS_IWARP_ERROR] = IB_QPS_ERR,
},
[ERDMA_PROTO_ROCEV2] = {
[ERDMA_QPS_ROCEV2_RESET] = IB_QPS_RESET,
[ERDMA_QPS_ROCEV2_INIT] = IB_QPS_INIT,
[ERDMA_QPS_ROCEV2_RTR] = IB_QPS_RTR,
[ERDMA_QPS_ROCEV2_RTS] = IB_QPS_RTS,
[ERDMA_QPS_ROCEV2_SQD] = IB_QPS_SQD,
[ERDMA_QPS_ROCEV2_SQE] = IB_QPS_SQE,
[ERDMA_QPS_ROCEV2_ERROR] = IB_QPS_ERR,
},
};
static inline enum erdma_qps_iwarp ib_to_iwarp_qps(enum ib_qp_state state)
{
return ib_qps_to_erdma_qps[ERDMA_PROTO_IWARP][state];
}
static inline enum erdma_qps_rocev2 ib_to_rocev2_qps(enum ib_qp_state state)
{
return ib_qps_to_erdma_qps[ERDMA_PROTO_ROCEV2][state];
}
static inline enum ib_qp_state iwarp_to_ib_qps(enum erdma_qps_iwarp state)
{
return erdma_qps_to_ib_qps[ERDMA_PROTO_IWARP][state];
}
static inline enum ib_qp_state rocev2_to_ib_qps(enum erdma_qps_rocev2 state)
{
return erdma_qps_to_ib_qps[ERDMA_PROTO_ROCEV2][state];
}
static int erdma_check_qp_attrs(struct erdma_qp *qp, struct ib_qp_attr *attr,
int attr_mask)
{
enum ib_qp_state cur_state, nxt_state;
struct erdma_dev *dev = qp->dev;
int ret = -EINVAL;
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) {
ret = -EOPNOTSUPP;
goto out;
}
if ((attr_mask & IB_QP_PORT) &&
!rdma_is_port_valid(&dev->ibdev, attr->port_num))
goto out;
if (erdma_device_rocev2(dev)) {
cur_state = (attr_mask & IB_QP_CUR_STATE) ?
attr->cur_qp_state :
rocev2_to_ib_qps(qp->attrs.rocev2.state);
nxt_state = (attr_mask & IB_QP_STATE) ? attr->qp_state :
cur_state;
if (!ib_modify_qp_is_ok(cur_state, nxt_state, qp->ibqp.qp_type,
attr_mask))
goto out;
if ((attr_mask & IB_QP_AV) &&
erdma_check_gid_attr(
rdma_ah_read_grh(&attr->ah_attr)->sgid_attr))
goto out;
if ((attr_mask & IB_QP_PKEY_INDEX) &&
attr->pkey_index >= ERDMA_MAX_PKEYS)
goto out;
}
return 0;
out:
return ret;
}
static void erdma_init_mod_qp_params_rocev2(
struct erdma_qp *qp, struct erdma_mod_qp_params_rocev2 *params,
int *erdma_attr_mask, struct ib_qp_attr *attr, int ib_attr_mask)
{
enum erdma_qpa_mask_rocev2 to_modify_attrs = 0;
enum erdma_qps_rocev2 cur_state, nxt_state;
u16 udp_sport;
if (ib_attr_mask & IB_QP_CUR_STATE)
cur_state = ib_to_rocev2_qps(attr->cur_qp_state);
else
cur_state = qp->attrs.rocev2.state;
if (ib_attr_mask & IB_QP_STATE)
nxt_state = ib_to_rocev2_qps(attr->qp_state);
else
nxt_state = cur_state;
to_modify_attrs |= ERDMA_QPA_ROCEV2_STATE;
params->state = nxt_state;
if (ib_attr_mask & IB_QP_QKEY) {
to_modify_attrs |= ERDMA_QPA_ROCEV2_QKEY;
params->qkey = attr->qkey;
}
if (ib_attr_mask & IB_QP_SQ_PSN) {
to_modify_attrs |= ERDMA_QPA_ROCEV2_SQ_PSN;
params->sq_psn = attr->sq_psn;
}
if (ib_attr_mask & IB_QP_RQ_PSN) {
to_modify_attrs |= ERDMA_QPA_ROCEV2_RQ_PSN;
params->rq_psn = attr->rq_psn;
}
if (ib_attr_mask & IB_QP_DEST_QPN) {
to_modify_attrs |= ERDMA_QPA_ROCEV2_DST_QPN;
params->dst_qpn = attr->dest_qp_num;
}
if (ib_attr_mask & IB_QP_AV) {
to_modify_attrs |= ERDMA_QPA_ROCEV2_AV;
udp_sport = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
QP_ID(qp), params->dst_qpn);
erdma_attr_to_av(&attr->ah_attr, &params->av, udp_sport);
}
*erdma_attr_mask = to_modify_attrs;
}
int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata)
{
struct erdma_qp_attrs new_attrs;
enum erdma_qp_attr_mask erdma_attr_mask = 0;
struct erdma_qp *qp = to_eqp(ibqp);
int ret = 0;
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
memset(&new_attrs, 0, sizeof(new_attrs));
if (attr_mask & IB_QP_STATE) {
new_attrs.state = ib_qp_state_to_erdma_qp_state[attr->qp_state];
erdma_attr_mask |= ERDMA_QP_ATTR_STATE;
}
union erdma_mod_qp_params params;
int ret = 0, erdma_attr_mask = 0;
down_write(&qp->state_lock);
ret = erdma_modify_qp_internal(qp, &new_attrs, erdma_attr_mask);
ret = erdma_check_qp_attrs(qp, attr, attr_mask);
if (ret)
goto out;
if (erdma_device_iwarp(qp->dev)) {
if (attr_mask & IB_QP_STATE) {
erdma_attr_mask |= ERDMA_QPA_IWARP_STATE;
params.iwarp.state = ib_to_iwarp_qps(attr->qp_state);
}
ret = erdma_modify_qp_state_iwarp(qp, &params.iwarp,
erdma_attr_mask);
} else {
erdma_init_mod_qp_params_rocev2(
qp, &params.rocev2, &erdma_attr_mask, attr, attr_mask);
ret = erdma_modify_qp_state_rocev2(qp, &params.rocev2,
erdma_attr_mask);
}
out:
up_write(&qp->state_lock);
return ret;
}
static enum ib_qp_state query_qp_state(struct erdma_qp *qp)
{
switch (qp->attrs.state) {
case ERDMA_QP_STATE_IDLE:
return IB_QPS_INIT;
case ERDMA_QP_STATE_RTR:
return IB_QPS_RTR;
case ERDMA_QP_STATE_RTS:
return IB_QPS_RTS;
case ERDMA_QP_STATE_CLOSING:
return IB_QPS_ERR;
case ERDMA_QP_STATE_TERMINATE:
return IB_QPS_ERR;
case ERDMA_QP_STATE_ERROR:
return IB_QPS_ERR;
default:
return IB_QPS_ERR;
}
if (erdma_device_iwarp(qp->dev))
return iwarp_to_ib_qps(qp->attrs.iwarp.state);
else
return rocev2_to_ib_qps(qp->attrs.rocev2.state);
}
int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
struct erdma_cmdq_query_qp_req_rocev2 req;
struct erdma_dev *dev;
struct erdma_qp *qp;
u64 resp0, resp1;
int ret;
if (ibqp && qp_attr && qp_init_attr) {
qp = to_eqp(ibqp);
@@ -1595,8 +1831,37 @@ int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
qp_attr->qp_state = query_qp_state(qp);
qp_attr->cur_qp_state = query_qp_state(qp);
if (erdma_device_rocev2(dev)) {
/* Query hardware to get some attributes */
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_QUERY_QP);
req.qpn = QP_ID(qp);
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
&resp1, true);
if (ret)
return ret;
qp_attr->sq_psn =
FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK, resp0);
qp_attr->rq_psn =
FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK, resp0);
qp_attr->qp_state = rocev2_to_ib_qps(FIELD_GET(
ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK, resp0));
qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->sq_draining = FIELD_GET(
ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK, resp0);
qp_attr->pkey_index = 0;
qp_attr->dest_qp_num = qp->attrs.rocev2.dst_qpn;
if (qp->ibqp.qp_type == IB_QPT_RC)
erdma_av_to_attr(&qp->attrs.rocev2.av,
&qp_attr->ah_attr);
} else {
qp_attr->qp_state = query_qp_state(qp);
qp_attr->cur_qp_state = qp_attr->qp_state;
}
return 0;
}
@@ -1736,7 +2001,7 @@ void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
CMDQ_OPCODE_CONF_MTU);
req.mtu = mtu;
erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, true);
}
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
@@ -1806,7 +2071,8 @@ static int erdma_query_hw_stats(struct erdma_dev *dev,
req.target_addr = dma_addr;
req.target_length = ERDMA_HW_RESP_SIZE;
err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
true);
if (err)
goto out;
@@ -1839,3 +2105,159 @@ int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
return stats->num_counters;
}
enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev, u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
static int erdma_set_gid(struct erdma_dev *dev, u8 op, u32 idx,
const union ib_gid *gid)
{
struct erdma_cmdq_set_gid_req req;
u8 ntype;
req.cfg = FIELD_PREP(ERDMA_CMD_SET_GID_SGID_IDX_MASK, idx) |
FIELD_PREP(ERDMA_CMD_SET_GID_OP_MASK, op);
if (op == ERDMA_SET_GID_OP_ADD) {
if (ipv6_addr_v4mapped((struct in6_addr *)gid))
ntype = ERDMA_NETWORK_TYPE_IPV4;
else
ntype = ERDMA_NETWORK_TYPE_IPV6;
req.cfg |= FIELD_PREP(ERDMA_CMD_SET_GID_NTYPE_MASK, ntype);
memcpy(&req.gid, gid, ERDMA_ROCEV2_GID_SIZE);
}
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_SET_GID);
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
true);
}
int erdma_add_gid(const struct ib_gid_attr *attr, void **context)
{
struct erdma_dev *dev = to_edev(attr->device);
int ret;
ret = erdma_check_gid_attr(attr);
if (ret)
return ret;
return erdma_set_gid(dev, ERDMA_SET_GID_OP_ADD, attr->index,
&attr->gid);
}
int erdma_del_gid(const struct ib_gid_attr *attr, void **context)
{
return erdma_set_gid(to_edev(attr->device), ERDMA_SET_GID_OP_DEL,
attr->index, NULL);
}
int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
{
if (index >= ERDMA_MAX_PKEYS)
return -EINVAL;
*pkey = ERDMA_DEFAULT_PKEY;
return 0;
}
void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av)
{
av_cfg->cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_AV_FL_MASK, av->flow_label) |
FIELD_PREP(ERDMA_CMD_CREATE_AV_NTYPE_MASK, av->ntype);
av_cfg->traffic_class = av->traffic_class;
av_cfg->hop_limit = av->hop_limit;
av_cfg->sl = av->sl;
av_cfg->udp_sport = av->udp_sport;
av_cfg->sgid_index = av->sgid_index;
ether_addr_copy(av_cfg->dmac, av->dmac);
memcpy(av_cfg->dgid, av->dgid, ERDMA_ROCEV2_GID_SIZE);
}
int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
const struct ib_global_route *grh =
rdma_ah_read_grh(init_attr->ah_attr);
struct erdma_dev *dev = to_edev(ibah->device);
struct erdma_pd *pd = to_epd(ibah->pd);
struct erdma_ah *ah = to_eah(ibah);
struct erdma_cmdq_create_ah_req req;
u32 udp_sport;
int ret;
ret = erdma_check_gid_attr(grh->sgid_attr);
if (ret)
return ret;
ret = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_AH]);
if (ret < 0)
return ret;
ah->ahn = ret;
if (grh->flow_label)
udp_sport = rdma_flow_label_to_udp_sport(grh->flow_label);
else
udp_sport =
IB_ROCE_UDP_ENCAP_VALID_PORT_MIN + (ah->ahn & 0x3FFF);
erdma_attr_to_av(init_attr->ah_attr, &ah->av, udp_sport);
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_CREATE_AH);
req.pdn = pd->pdn;
req.ahn = ah->ahn;
erdma_set_av_cfg(&req.av_cfg, &ah->av);
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
if (ret) {
erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
return ret;
}
return 0;
}
int erdma_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct erdma_dev *dev = to_edev(ibah->device);
struct erdma_pd *pd = to_epd(ibah->pd);
struct erdma_ah *ah = to_eah(ibah);
struct erdma_cmdq_destroy_ah_req req;
int ret;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_DESTROY_AH);
req.pdn = pd->pdn;
req.ahn = ah->ahn;
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
flags & RDMA_DESTROY_AH_SLEEPABLE);
if (ret)
return ret;
erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
return 0;
}
int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
{
struct erdma_ah *ah = to_eah(ibah);
memset(ah_attr, 0, sizeof(*ah_attr));
erdma_av_to_attr(&ah->av, ah_attr);
return 0;
}

View File

@@ -136,6 +136,25 @@ struct erdma_user_dbrecords_page {
int refcnt;
};
struct erdma_av {
u8 port;
u8 hop_limit;
u8 traffic_class;
u8 sl;
u8 sgid_index;
u16 udp_sport;
u32 flow_label;
u8 dmac[ETH_ALEN];
u8 dgid[ERDMA_ROCEV2_GID_SIZE];
enum erdma_network_type ntype;
};
struct erdma_ah {
struct ib_ah ibah;
struct erdma_av av;
u32 ahn;
};
struct erdma_uqp {
struct erdma_mem sq_mem;
struct erdma_mem rq_mem;
@@ -176,33 +195,91 @@ struct erdma_kqp {
u8 sig_all;
};
enum erdma_qp_state {
ERDMA_QP_STATE_IDLE = 0,
ERDMA_QP_STATE_RTR = 1,
ERDMA_QP_STATE_RTS = 2,
ERDMA_QP_STATE_CLOSING = 3,
ERDMA_QP_STATE_TERMINATE = 4,
ERDMA_QP_STATE_ERROR = 5,
ERDMA_QP_STATE_UNDEF = 7,
ERDMA_QP_STATE_COUNT = 8
enum erdma_qps_iwarp {
ERDMA_QPS_IWARP_IDLE = 0,
ERDMA_QPS_IWARP_RTR = 1,
ERDMA_QPS_IWARP_RTS = 2,
ERDMA_QPS_IWARP_CLOSING = 3,
ERDMA_QPS_IWARP_TERMINATE = 4,
ERDMA_QPS_IWARP_ERROR = 5,
ERDMA_QPS_IWARP_UNDEF = 6,
ERDMA_QPS_IWARP_COUNT = 7,
};
enum erdma_qp_attr_mask {
ERDMA_QP_ATTR_STATE = (1 << 0),
ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2),
ERDMA_QP_ATTR_ORD = (1 << 3),
ERDMA_QP_ATTR_IRD = (1 << 4),
ERDMA_QP_ATTR_SQ_SIZE = (1 << 5),
ERDMA_QP_ATTR_RQ_SIZE = (1 << 6),
ERDMA_QP_ATTR_MPA = (1 << 7)
enum erdma_qpa_mask_iwarp {
ERDMA_QPA_IWARP_STATE = (1 << 0),
ERDMA_QPA_IWARP_LLP_HANDLE = (1 << 2),
ERDMA_QPA_IWARP_ORD = (1 << 3),
ERDMA_QPA_IWARP_IRD = (1 << 4),
ERDMA_QPA_IWARP_SQ_SIZE = (1 << 5),
ERDMA_QPA_IWARP_RQ_SIZE = (1 << 6),
ERDMA_QPA_IWARP_MPA = (1 << 7),
ERDMA_QPA_IWARP_CC = (1 << 8),
};
enum erdma_qps_rocev2 {
ERDMA_QPS_ROCEV2_RESET = 0,
ERDMA_QPS_ROCEV2_INIT = 1,
ERDMA_QPS_ROCEV2_RTR = 2,
ERDMA_QPS_ROCEV2_RTS = 3,
ERDMA_QPS_ROCEV2_SQD = 4,
ERDMA_QPS_ROCEV2_SQE = 5,
ERDMA_QPS_ROCEV2_ERROR = 6,
ERDMA_QPS_ROCEV2_COUNT = 7,
};
enum erdma_qpa_mask_rocev2 {
ERDMA_QPA_ROCEV2_STATE = (1 << 0),
ERDMA_QPA_ROCEV2_QKEY = (1 << 1),
ERDMA_QPA_ROCEV2_AV = (1 << 2),
ERDMA_QPA_ROCEV2_SQ_PSN = (1 << 3),
ERDMA_QPA_ROCEV2_RQ_PSN = (1 << 4),
ERDMA_QPA_ROCEV2_DST_QPN = (1 << 5),
};
enum erdma_qp_flags {
ERDMA_QP_IN_FLUSHING = (1 << 0),
};
#define ERDMA_QP_ACTIVE 0
#define ERDMA_QP_PASSIVE 1
struct erdma_mod_qp_params_iwarp {
enum erdma_qps_iwarp state;
enum erdma_cc_alg cc;
u8 qp_type;
u8 pd_len;
u32 irq_size;
u32 orq_size;
};
struct erdma_qp_attrs_iwarp {
enum erdma_qps_iwarp state;
u32 cookie;
};
struct erdma_mod_qp_params_rocev2 {
enum erdma_qps_rocev2 state;
u32 qkey;
u32 sq_psn;
u32 rq_psn;
u32 dst_qpn;
struct erdma_av av;
};
union erdma_mod_qp_params {
struct erdma_mod_qp_params_iwarp iwarp;
struct erdma_mod_qp_params_rocev2 rocev2;
};
struct erdma_qp_attrs_rocev2 {
enum erdma_qps_rocev2 state;
u32 qkey;
u32 dst_qpn;
struct erdma_av av;
};
struct erdma_qp_attrs {
enum erdma_qp_state state;
enum erdma_cc_alg cc; /* Congestion control algorithm */
u32 sq_size;
u32 rq_size;
@@ -210,11 +287,10 @@ struct erdma_qp_attrs {
u32 irq_size;
u32 max_send_sge;
u32 max_recv_sge;
u32 cookie;
#define ERDMA_QP_ACTIVE 0
#define ERDMA_QP_PASSIVE 1
u8 qp_type;
u8 pd_len;
union {
struct erdma_qp_attrs_iwarp iwarp;
struct erdma_qp_attrs_rocev2 rocev2;
};
};
struct erdma_qp {
@@ -286,11 +362,25 @@ static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
void erdma_qp_get(struct erdma_qp *qp);
void erdma_qp_put(struct erdma_qp *qp);
int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
enum erdma_qp_attr_mask mask);
int erdma_modify_qp_state_iwarp(struct erdma_qp *qp,
struct erdma_mod_qp_params_iwarp *params,
int mask);
int erdma_modify_qp_state_rocev2(struct erdma_qp *qp,
struct erdma_mod_qp_params_rocev2 *params,
int attr_mask);
void erdma_qp_llp_close(struct erdma_qp *qp);
void erdma_qp_cm_drop(struct erdma_qp *qp);
static inline bool erdma_device_iwarp(struct erdma_dev *dev)
{
return dev->proto == ERDMA_PROTO_IWARP;
}
static inline bool erdma_device_rocev2(struct erdma_dev *dev)
{
return dev->proto == ERDMA_PROTO_ROCEV2;
}
static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
{
return container_of(ibctx, struct erdma_ucontext, ibucontext);
@@ -316,6 +406,21 @@ static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
return container_of(ibcq, struct erdma_cq, ibcq);
}
static inline struct erdma_ah *to_eah(struct ib_ah *ibah)
{
return container_of(ibah, struct erdma_ah, ibah);
}
static inline int erdma_check_gid_attr(const struct ib_gid_attr *attr)
{
u8 ntype = rdma_gid_attr_network_type(attr);
if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6)
return -EINVAL;
return 0;
}
static inline struct erdma_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry *ibmmap)
{
@@ -360,6 +465,7 @@ int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
void erdma_remove_cqes_of_qp(struct ib_cq *ibcq, u32 qpn);
struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
u32 max_num_sg);
int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
@@ -370,5 +476,15 @@ struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device,
u32 port_num);
int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port, int index);
enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev,
u32 port_num);
int erdma_add_gid(const struct ib_gid_attr *attr, void **context);
int erdma_del_gid(const struct ib_gid_attr *attr, void **context);
int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av);
int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int erdma_destroy_ah(struct ib_ah *ibah, u32 flags);
int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
#endif

View File

@@ -2339,20 +2339,6 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
/*
* this is used for formatting hw error messages...
*/
struct hfi1_hwerror_msgs {
u64 mask;
const char *msg;
size_t sz;
};
/* in intr.c... */
void hfi1_format_hwerrors(u64 hwerrs,
const struct hfi1_hwerror_msgs *hwerrmsgs,
size_t nhwerrmsgs, char *msg, size_t lmsg);
#define USER_OPCODE_CHECK_VAL 0xC0
#define USER_OPCODE_CHECK_MASK 0xC0
#define OPCODE_CHECK_VAL_DISABLED 0x0

View File

@@ -47,37 +47,6 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
hfi1_event_pkey_change(ppd->dd, ppd->port);
}
/**
* format_hwmsg - format a single hwerror message
* @msg: message buffer
* @msgl: length of message buffer
* @hwmsg: message to add to message buffer
*/
static void format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
{
strlcat(msg, "[", msgl);
strlcat(msg, hwmsg, msgl);
strlcat(msg, "]", msgl);
}
/**
* hfi1_format_hwerrors - format hardware error messages for display
* @hwerrs: hardware errors bit vector
* @hwerrmsgs: hardware error descriptions
* @nhwerrmsgs: number of hwerrmsgs
* @msg: message buffer
* @msgl: message buffer length
*/
void hfi1_format_hwerrors(u64 hwerrs, const struct hfi1_hwerror_msgs *hwerrmsgs,
size_t nhwerrmsgs, char *msg, size_t msgl)
{
int i;
for (i = 0; i < nhwerrmsgs; i++)
if (hwerrs & hwerrmsgs[i].mask)
format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
}
static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev)
{
struct ib_event event;

View File

@@ -27,8 +27,8 @@ static struct hfi1_pportdata *hfi1_get_pportdata_kobj(struct kobject *kobj)
* Congestion control table size followed by table entries
*/
static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
int ret;
struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj);
@@ -57,7 +57,7 @@ static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
static const BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
/*
* Congestion settings: port control, control map and an array of 16
@@ -65,7 +65,7 @@ static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
* trigger threshold and the minimum injection rate delay.
*/
static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj);
@@ -93,9 +93,9 @@ static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
static const BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
static struct bin_attribute *port_cc_bin_attributes[] = {
static const struct bin_attribute *const port_cc_bin_attributes[] = {
&bin_attr_cc_setting_bin,
&bin_attr_cc_table_bin,
NULL
@@ -134,7 +134,7 @@ static struct attribute *port_cc_attributes[] = {
static const struct attribute_group port_cc_group = {
.name = "CCMgtA",
.attrs = port_cc_attributes,
.bin_attrs = port_cc_bin_attributes,
.bin_attrs_new = port_cc_bin_attributes,
};
/* Start sc2vl */

View File

@@ -1,21 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HNS
tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on (HNS_DSAF && HNS_ENET) || HNS3
help
This is a RoCE/RDMA driver for the Hisilicon RoCE engine.
To compile HIP08 driver as module, choose M here.
config INFINIBAND_HNS_HIP08
bool "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3
depends on INFINIBAND_HNS=m || HNS3=y
tristate "Hisilicon Hip08 Family RoCE support"
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on PCI && HNS3
help
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device.
To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
module will be called hns-roce-hw-v2.
To compile this driver, choose M here. This module will be called
hns-roce-hw-v2.

View File

@@ -5,12 +5,9 @@
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o \
hns_roce_debugfs.o
hns_roce_debugfs.o hns_roce_hw_v2.o
ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs)
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
endif
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o

View File

@@ -7185,9 +7185,22 @@ static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
return ret;
}
static void hns_roce_hw_v2_link_status_change(struct hnae3_handle *handle,
bool linkup)
{
struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
struct net_device *netdev = handle->rinfo.netdev;
if (linkup || !hr_dev)
return;
ib_dispatch_port_state_event(&hr_dev->ib_dev, netdev);
}
static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
.init_instance = hns_roce_hw_v2_init_instance,
.uninit_instance = hns_roce_hw_v2_uninit_instance,
.link_status_change = hns_roce_hw_v2_link_status_change,
.reset_notify = hns_roce_hw_v2_reset_notify,
};

View File

@@ -59,10 +59,6 @@ int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
struct irdma_hmc_fcn_info *hmcfcninfo,
u16 *pmf_idx);
int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);

View File

@@ -85,10 +85,6 @@ int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
int irdma_process_bh(struct irdma_sc_dev *dev);
int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,

View File

@@ -320,9 +320,6 @@ int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
case NETDEV_DOWN:
iwdev->iw_status = 0;
fallthrough;
case NETDEV_UP:
irdma_port_ibevent(iwdev);
break;
default:
break;
}
@@ -971,74 +968,6 @@ void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
irdma_qp_rem_ref(&iwqp->ibqp);
}
/**
* irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
* @dev: function device struct
* @val_mem: buffer for fpm
* @hmc_fn_id: function id for fpm
*/
int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_pci_f *rf = dev_to_rf(dev);
int status;
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
if (!cqp_request)
return -ENOMEM;
cqp_info = &cqp_request->info;
cqp_request->param = NULL;
cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
cqp_info->post_sq = 1;
cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
return status;
}
/**
* irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
* @dev: hardware control device structure
* @val_mem: buffer with fpm values
* @hmc_fn_id: function id for fpm
*/
int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_pci_f *rf = dev_to_rf(dev);
int status;
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
if (!cqp_request)
return -ENOMEM;
cqp_info = &cqp_request->info;
cqp_request->param = NULL;
cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
cqp_info->post_sq = 1;
cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
return status;
}
/**
* irdma_cqp_cq_create_cmd - create a cq for the cqp
* @dev: device pointer

View File

@@ -150,8 +150,12 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev,
return PTR_ERR(*umem);
shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
if (shift < 0) {
err = shift;
goto err_buf;
}
err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
if (err)
goto err_buf;

View File

@@ -351,7 +351,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
struct mlx4_port_gid_table *port_gid_table;
int ret = 0;
int hw_update = 0;
struct gid_entry *gids;
struct gid_entry *gids = NULL;
if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
return -EINVAL;
@@ -389,10 +389,10 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
}
spin_unlock_bh(&iboe->lock);
if (!ret && hw_update) {
if (gids)
ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
kfree(gids);
}
kfree(gids);
return ret;
}
@@ -2341,39 +2341,40 @@ static void mlx4_ib_scan_netdev(struct mlx4_ib_dev *ibdev,
iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL;
if (event == NETDEV_UP || event == NETDEV_DOWN) {
enum ib_port_state port_state;
struct ib_event ibev = { };
if (ib_get_cached_port_state(&ibdev->ib_dev, dev->dev_port + 1,
&port_state))
goto iboe_out;
if (event == NETDEV_UP &&
(port_state != IB_PORT_ACTIVE ||
iboe->last_port_state[dev->dev_port] != IB_PORT_DOWN))
goto iboe_out;
if (event == NETDEV_DOWN &&
(port_state != IB_PORT_DOWN ||
iboe->last_port_state[dev->dev_port] != IB_PORT_ACTIVE))
goto iboe_out;
iboe->last_port_state[dev->dev_port] = port_state;
ibev.device = &ibdev->ib_dev;
ibev.element.port_num = dev->dev_port + 1;
ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
IB_EVENT_PORT_ERR;
ib_dispatch_event(&ibev);
}
iboe_out:
spin_unlock_bh(&iboe->lock);
if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
event == NETDEV_UP || event == NETDEV_CHANGE)
if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER)
mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1);
}
static void mlx4_ib_port_event(struct ib_device *ibdev, struct net_device *ndev,
unsigned long event)
{
struct mlx4_ib_dev *mlx4_ibdev =
container_of(ibdev, struct mlx4_ib_dev, ib_dev);
struct mlx4_ib_iboe *iboe = &mlx4_ibdev->iboe;
if (!net_eq(dev_net(ndev), &init_net))
return;
ASSERT_RTNL();
if (ndev->dev.parent != mlx4_ibdev->ib_dev.dev.parent)
return;
spin_lock_bh(&iboe->lock);
iboe->netdevs[ndev->dev_port] = event != NETDEV_UNREGISTER ? ndev : NULL;
if (event == NETDEV_UP || event == NETDEV_DOWN)
ib_dispatch_port_state_event(&mlx4_ibdev->ib_dev, ndev);
spin_unlock_bh(&iboe->lock);
if (event == NETDEV_UP || event == NETDEV_CHANGE)
mlx4_ib_update_qps(mlx4_ibdev, ndev, ndev->dev_port + 1);
}
static int mlx4_ib_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -2569,6 +2570,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
.req_notify_cq = mlx4_ib_arm_cq,
.rereg_user_mr = mlx4_ib_rereg_user_mr,
.resize_cq = mlx4_ib_resize_cq,
.report_port_event = mlx4_ib_port_event,
INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),

View File

@@ -667,6 +667,9 @@ struct mlx4_uverbs_ex_query_device {
__u32 reserved;
};
/* 4k - 4G */
#define MLX4_PAGE_SIZE_SUPPORTED ((unsigned long)GENMASK_ULL(31, 12))
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
@@ -936,8 +939,19 @@ mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
{
return 0;
}
int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
int *num_of_mtts);
static inline int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
u64 start,
int *num_of_mtts)
{
unsigned long pg_sz;
pg_sz = ib_umem_find_best_pgsz(umem, MLX4_PAGE_SIZE_SUPPORTED, start);
if (!pg_sz)
return -EOPNOTSUPP;
*num_of_mtts = ib_umem_num_dma_blocks(umem, pg_sz);
return order_base_2(pg_sz);
}
int mlx4_ib_cm_init(void);
void mlx4_ib_cm_destroy(void);

View File

@@ -87,286 +87,20 @@ err_free:
return ERR_PTR(err);
}
enum {
MLX4_MAX_MTT_SHIFT = 31
};
static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
struct mlx4_mtt *mtt,
u64 mtt_size, u64 mtt_shift, u64 len,
u64 cur_start_addr, u64 *pages,
int *start_index, int *npages)
{
u64 cur_end_addr = cur_start_addr + len;
u64 cur_end_addr_aligned = 0;
u64 mtt_entries;
int err = 0;
int k;
len += (cur_start_addr & (mtt_size - 1ULL));
cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
len += (cur_end_addr_aligned - cur_end_addr);
if (len & (mtt_size - 1ULL)) {
pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
len, mtt_size);
return -EINVAL;
}
mtt_entries = (len >> mtt_shift);
/*
* Align the MTT start address to the mtt_size.
* Required to handle cases when the MR starts in the middle of an MTT
* record. Was not required in old code since the physical addresses
* provided by the dma subsystem were page aligned, which was also the
* MTT size.
*/
cur_start_addr = round_down(cur_start_addr, mtt_size);
/* A new block is started ... */
for (k = 0; k < mtt_entries; ++k) {
pages[*npages] = cur_start_addr + (mtt_size * k);
(*npages)++;
/*
* Be friendly to mlx4_write_mtt() and pass it chunks of
* appropriate size.
*/
if (*npages == PAGE_SIZE / sizeof(u64)) {
err = mlx4_write_mtt(dev->dev, mtt, *start_index,
*npages, pages);
if (err)
return err;
(*start_index) += *npages;
*npages = 0;
}
}
return 0;
}
static inline u64 alignment_of(u64 ptr)
{
return ilog2(ptr & (~(ptr - 1)));
}
static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
u64 current_block_end,
u64 block_shift)
{
/* Check whether the alignment of the new block is aligned as well as
* the previous block.
* Block address must start with zeros till size of entity_size.
*/
if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
/*
* It is not as well aligned as the previous block-reduce the
* mtt size accordingly. Here we take the last right bit which
* is 1.
*/
block_shift = alignment_of(next_block_start);
/*
* Check whether the alignment of the end of previous block - is it
* aligned as well as the start of the block
*/
if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
/*
* It is not as well aligned as the start of the block -
* reduce the mtt size accordingly.
*/
block_shift = alignment_of(current_block_end);
return block_shift;
}
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem)
{
u64 *pages;
u64 len = 0;
int err = 0;
u64 mtt_size;
u64 cur_start_addr = 0;
u64 mtt_shift;
int start_index = 0;
int npages = 0;
struct scatterlist *sg;
int i;
struct ib_block_iter biter;
int err, i = 0;
u64 addr;
pages = (u64 *) __get_free_page(GFP_KERNEL);
if (!pages)
return -ENOMEM;
mtt_shift = mtt->page_shift;
mtt_size = 1ULL << mtt_shift;
for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
if (cur_start_addr + len == sg_dma_address(sg)) {
/* still the same block */
len += sg_dma_len(sg);
continue;
}
/*
* A new block is started ...
* If len is malaligned, write an extra mtt entry to cover the
* misaligned area (round up the division)
*/
err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
mtt_shift, len,
cur_start_addr,
pages, &start_index,
&npages);
rdma_umem_for_each_dma_block(umem, &biter, BIT(mtt->page_shift)) {
addr = rdma_block_iter_dma_address(&biter);
err = mlx4_write_mtt(dev->dev, mtt, i++, 1, &addr);
if (err)
goto out;
cur_start_addr = sg_dma_address(sg);
len = sg_dma_len(sg);
return err;
}
/* Handle the last block */
if (len > 0) {
/*
* If len is malaligned, write an extra mtt entry to cover
* the misaligned area (round up the division)
*/
err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
mtt_shift, len,
cur_start_addr, pages,
&start_index, &npages);
if (err)
goto out;
}
if (npages)
err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
out:
free_page((unsigned long) pages);
return err;
}
/*
* Calculate optimal mtt size based on contiguous pages.
* Function will return also the number of pages that are not aligned to the
* calculated mtt_size to be added to total number of pages. For that we should
* check the first chunk length & last chunk length and if not aligned to
* mtt_size we should increment the non_aligned_pages number. All chunks in the
* middle already handled as part of mtt shift calculation for both their start
* & end addresses.
*/
int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
int *num_of_mtts)
{
u64 block_shift = MLX4_MAX_MTT_SHIFT;
u64 min_shift = PAGE_SHIFT;
u64 last_block_aligned_end = 0;
u64 current_block_start = 0;
u64 first_block_start = 0;
u64 current_block_len = 0;
u64 last_block_end = 0;
struct scatterlist *sg;
u64 current_block_end;
u64 misalignment_bits;
u64 next_block_start;
u64 total_len = 0;
int i;
*num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
/*
* Initialization - save the first chunk start as the
* current_block_start - block means contiguous pages.
*/
if (current_block_len == 0 && current_block_start == 0) {
current_block_start = sg_dma_address(sg);
first_block_start = current_block_start;
/*
* Find the bits that are different between the physical
* address and the virtual address for the start of the
* MR.
* umem_get aligned the start_va to a page boundary.
* Therefore, we need to align the start va to the same
* boundary.
* misalignment_bits is needed to handle the case of a
* single memory region. In this case, the rest of the
* logic will not reduce the block size. If we use a
* block size which is bigger than the alignment of the
* misalignment bits, we might use the virtual page
* number instead of the physical page number, resulting
* in access to the wrong data.
*/
misalignment_bits =
(start_va & (~(((u64)(PAGE_SIZE)) - 1ULL))) ^
current_block_start;
block_shift = min(alignment_of(misalignment_bits),
block_shift);
}
/*
* Go over the scatter entries and check if they continue the
* previous scatter entry.
*/
next_block_start = sg_dma_address(sg);
current_block_end = current_block_start + current_block_len;
/* If we have a split (non-contig.) between two blocks */
if (current_block_end != next_block_start) {
block_shift = mlx4_ib_umem_calc_block_mtt
(next_block_start,
current_block_end,
block_shift);
/*
* If we reached the minimum shift for 4k page we stop
* the loop.
*/
if (block_shift <= min_shift)
goto end;
/*
* If not saved yet we are in first block - we save the
* length of first block to calculate the
* non_aligned_pages number at the end.
*/
total_len += current_block_len;
/* Start a new block */
current_block_start = next_block_start;
current_block_len = sg_dma_len(sg);
continue;
}
/* The scatter entry is another part of the current block,
* increase the block size.
* An entry in the scatter can be larger than 4k (page) as of
* dma mapping which merge some blocks together.
*/
current_block_len += sg_dma_len(sg);
}
/* Account for the last block in the total len */
total_len += current_block_len;
/* Add to the first block the misalignment that it suffers from. */
total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
last_block_end = current_block_start + current_block_len;
last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
total_len += (last_block_aligned_end - last_block_end);
if (total_len & ((1ULL << block_shift) - 1ULL))
pr_warn("misaligned total length detected (%llu, %llu)!",
total_len, block_shift);
*num_of_mtts = total_len >> block_shift;
end:
if (block_shift < min_shift) {
/*
* If shift is less than the min we set a warning and return the
* min shift.
*/
pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
block_shift = min_shift;
}
return block_shift;
return 0;
}
static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
@@ -424,6 +158,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
if (shift < 0) {
err = shift;
goto err_umem;
}
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
convert_access(access_flags), n, shift, &mr->mmr);

View File

@@ -925,8 +925,12 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
}
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (shift < 0) {
err = shift;
goto err_buf;
}
err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (err)
goto err_buf;
@@ -1108,8 +1112,12 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
}
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (shift < 0) {
err = shift;
goto err_buf;
}
err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (err)
goto err_buf;

View File

@@ -242,6 +242,10 @@ static int mlx5_netdev_event(struct notifier_block *this,
case NETDEV_DOWN: {
struct net_device *upper = NULL;
if (!netif_is_lag_master(ndev) && !netif_is_lag_port(ndev) &&
!mlx5_core_mp_enabled(mdev))
return NOTIFY_DONE;
if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
struct net_device *lag_ndev;

View File

@@ -669,6 +669,12 @@ struct mlx5_ib_mkey {
#define mlx5_update_odp_stats(mr, counter_name, value) \
atomic64_add(value, &((mr)->odp_stats.counter_name))
#define mlx5_update_odp_stats_with_handled(mr, counter_name, value) \
do { \
mlx5_update_odp_stats(mr, counter_name, value); \
atomic64_add(1, &((mr)->odp_stats.counter_name##_handled)); \
} while (0)
struct mlx5_ib_mr {
struct ib_mr ibmr;
struct mlx5_ib_mkey mmkey;

View File

@@ -2021,6 +2021,11 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
bool is_odp = is_odp_mr(mr);
int ret = 0;
if (is_odp)
mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
ent = mr->mmkey.cache_ent;
@@ -2032,7 +2037,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
ent->tmp_cleanup_scheduled = true;
}
spin_unlock_irq(&ent->mkeys_queue.lock);
return 0;
goto out;
}
if (ent) {
@@ -2041,7 +2046,15 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
mr->mmkey.cache_ent = NULL;
spin_unlock_irq(&ent->mkeys_queue.lock);
}
return destroy_mkey(dev, mr);
ret = destroy_mkey(dev, mr);
out:
if (is_odp) {
if (!ret)
to_ib_umem_odp(mr->umem)->private = NULL;
mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
}
return ret;
}
static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)

View File

@@ -228,13 +228,27 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
struct mlx5_ib_mr *imr = mr->parent;
/*
* If userspace is racing freeing the parent implicit ODP MR then we can
* loose the race with parent destruction. In this case
* mlx5_ib_free_odp_mr() will free everything in the implicit_children
* xarray so NOP is fine. This child MR cannot be destroyed here because
* we are under its umem_mutex.
*/
if (!refcount_inc_not_zero(&imr->mmkey.usecount))
return;
xa_erase(&imr->implicit_children, idx);
xa_lock(&imr->implicit_children);
if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) !=
mr) {
xa_unlock(&imr->implicit_children);
return;
}
if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault))
xa_erase(&mr_to_mdev(mr)->odp_mkeys,
mlx5_base_mkey(mr->mmkey.key));
__xa_erase(&mr_to_mdev(mr)->odp_mkeys,
mlx5_base_mkey(mr->mmkey.key));
xa_unlock(&imr->implicit_children);
/* Freeing a MR is a sleeping operation, so bounce to a work queue */
INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
@@ -268,6 +282,8 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
if (!umem_odp->npages)
goto out;
mr = umem_odp->private;
if (!mr)
goto out;
start = max_t(u64, ib_umem_start(umem_odp), range->start);
end = min_t(u64, ib_umem_end(umem_odp), range->end);
@@ -313,7 +329,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
mlx5_update_odp_stats(mr, invalidations, invalidations);
mlx5_update_odp_stats_with_handled(mr, invalidations, invalidations);
/*
* We are now sure that the device will not access the
@@ -500,18 +516,18 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
refcount_inc(&ret->mmkey.usecount);
goto out_lock;
}
xa_unlock(&imr->implicit_children);
if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
&mr->mmkey, GFP_KERNEL);
ret = __xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
&mr->mmkey, GFP_KERNEL);
if (xa_is_err(ret)) {
ret = ERR_PTR(xa_err(ret));
xa_erase(&imr->implicit_children, idx);
goto out_mr;
__xa_erase(&imr->implicit_children, idx);
goto out_lock;
}
mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD;
}
xa_unlock(&imr->implicit_children);
mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
return mr;
@@ -944,8 +960,7 @@ out:
/*
* Handle a single data segment in a page-fault WQE or RDMA region.
*
* Returns number of OS pages retrieved on success. The caller may continue to
* the next data segment.
* Returns zero on success. The caller may continue to the next data segment.
* Can return the following error codes:
* -EAGAIN to designate a temporary error. The caller will abort handling the
* page fault and resolve it.
@@ -958,7 +973,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
u32 *bytes_committed,
u32 *bytes_mapped)
{
int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
int ret, i, outlen, cur_outlen = 0, depth = 0, pages_in_range;
struct pf_frame *head = NULL, *frame;
struct mlx5_ib_mkey *mmkey;
struct mlx5_ib_mr *mr;
@@ -993,13 +1008,20 @@ next_mr:
case MLX5_MKEY_MR:
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
pages_in_range = (ALIGN(io_virt + bcnt, PAGE_SIZE) -
(io_virt & PAGE_MASK)) >>
PAGE_SHIFT;
ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false);
if (ret < 0)
goto end;
mlx5_update_odp_stats(mr, faults, ret);
mlx5_update_odp_stats_with_handled(mr, faults, ret);
if (ret < pages_in_range) {
ret = -EFAULT;
goto end;
}
npages += ret;
ret = 0;
break;
@@ -1090,7 +1112,7 @@ end:
kfree(out);
*bytes_committed = 0;
return ret ? ret : npages;
return ret;
}
/*
@@ -1109,8 +1131,7 @@ end:
* the committed bytes).
* @receive_queue: receive WQE end of sg list
*
* Returns the number of pages loaded if positive, zero for an empty WQE, or a
* negative error code.
* Returns zero for success or a negative error code.
*/
static int pagefault_data_segments(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault,
@@ -1118,7 +1139,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
void *wqe_end, u32 *bytes_mapped,
u32 *total_wqe_bytes, bool receive_queue)
{
int ret = 0, npages = 0;
int ret = 0;
u64 io_virt;
__be32 key;
u32 byte_count;
@@ -1175,10 +1196,9 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
bytes_mapped);
if (ret < 0)
break;
npages += ret;
}
return ret < 0 ? ret : npages;
return ret;
}
/*
@@ -1414,12 +1434,6 @@ resolve_page_fault:
free_page((unsigned long)wqe_start);
}
static int pages_in_range(u64 address, u32 length)
{
return (ALIGN(address + length, PAGE_SIZE) -
(address & PAGE_MASK)) >> PAGE_SHIFT;
}
static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault)
{
@@ -1458,7 +1472,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
if (ret == -EAGAIN) {
/* We're racing with an invalidation, don't prefetch */
prefetch_activated = 0;
} else if (ret < 0 || pages_in_range(address, length) > ret) {
} else if (ret < 0) {
mlx5_ib_page_fault_resume(dev, pfault, 1);
if (ret != -ENOENT)
mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%llx, type: 0x%x\n",
@@ -1529,7 +1543,7 @@ static void mlx5_ib_mr_memory_pfault_handler(struct mlx5_ib_dev *dev,
goto err;
}
mlx5_update_odp_stats(mr, faults, ret);
mlx5_update_odp_stats_with_handled(mr, faults, ret);
mlx5r_deref_odp_mkey(mmkey);
if (pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST)

View File

@@ -95,10 +95,19 @@ static int fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
if (rdma_nl_stat_hwcounter_entry(msg, "page_faults",
atomic64_read(&mr->odp_stats.faults)))
goto err_table;
if (rdma_nl_stat_hwcounter_entry(
msg, "page_faults_handled",
atomic64_read(&mr->odp_stats.faults_handled)))
goto err_table;
if (rdma_nl_stat_hwcounter_entry(
msg, "page_invalidations",
atomic64_read(&mr->odp_stats.invalidations)))
goto err_table;
if (rdma_nl_stat_hwcounter_entry(
msg, "page_invalidations_handled",
atomic64_read(&mr->odp_stats.invalidations_handled)))
goto err_table;
if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
atomic64_read(&mr->odp_stats.prefetch)))
goto err_table;

View File

@@ -214,8 +214,8 @@ static const struct attribute_group port_linkcontrol_group = {
* Congestion control table size followed by table entries
*/
static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
int ret;
@@ -241,7 +241,7 @@ static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
static const BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
/*
* Congestion settings: port control, control map and an array of 16
@@ -249,8 +249,8 @@ static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
* trigger threshold and the minimum injection rate delay.
*/
static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
int ret;
@@ -274,9 +274,9 @@ static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
static const BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
static struct bin_attribute *port_ccmgta_attributes[] = {
static const struct bin_attribute *const port_ccmgta_attributes[] = {
&bin_attr_cc_setting_bin,
&bin_attr_cc_table_bin,
NULL,
@@ -295,7 +295,7 @@ static umode_t qib_ccmgta_is_bin_visible(struct kobject *kobj,
static const struct attribute_group port_ccmgta_attribute_group = {
.name = "CCMgtA",
.is_bin_visible = qib_ccmgta_is_bin_visible,
.bin_attrs = port_ccmgta_attributes,
.bin_attrs_new = port_ccmgta_attributes,
};
/* Start sl2vl */

View File

@@ -151,34 +151,6 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
ib_event.element.port_num = 1;
ib_dispatch_event(&ib_event);
break;
case NETDEV_UP:
case NETDEV_DOWN:
case NETDEV_CHANGE:
if (!us_ibdev->ufdev->link_up &&
netif_carrier_ok(netdev)) {
usnic_fwd_carrier_up(us_ibdev->ufdev);
usnic_info("Link UP on %s\n",
dev_name(&us_ibdev->ib_dev.dev));
ib_event.event = IB_EVENT_PORT_ACTIVE;
ib_event.device = &us_ibdev->ib_dev;
ib_event.element.port_num = 1;
ib_dispatch_event(&ib_event);
} else if (us_ibdev->ufdev->link_up &&
!netif_carrier_ok(netdev)) {
usnic_fwd_carrier_down(us_ibdev->ufdev);
usnic_info("Link DOWN on %s\n",
dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
ib_event.event = IB_EVENT_PORT_ERR;
ib_event.device = &us_ibdev->ib_dev;
ib_event.element.port_num = 1;
ib_dispatch_event(&ib_event);
} else {
usnic_dbg("Ignoring %s on %s\n",
netdev_cmd_to_name(event),
dev_name(&us_ibdev->ib_dev.dev));
}
break;
case NETDEV_CHANGEADDR:
if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
sizeof(us_ibdev->ufdev->mac))) {
@@ -218,6 +190,50 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
mutex_unlock(&us_ibdev->usdev_lock);
}
static void usnic_ib_handle_port_event(struct ib_device *ibdev,
struct net_device *netdev,
unsigned long event)
{
struct usnic_ib_dev *us_ibdev =
container_of(ibdev, struct usnic_ib_dev, ib_dev);
struct ib_event ib_event;
mutex_lock(&us_ibdev->usdev_lock);
switch (event) {
case NETDEV_UP:
case NETDEV_DOWN:
case NETDEV_CHANGE:
if (!us_ibdev->ufdev->link_up &&
netif_carrier_ok(netdev)) {
usnic_fwd_carrier_up(us_ibdev->ufdev);
usnic_info("Link UP on %s\n",
dev_name(&us_ibdev->ib_dev.dev));
ib_event.event = IB_EVENT_PORT_ACTIVE;
ib_event.device = &us_ibdev->ib_dev;
ib_event.element.port_num = 1;
ib_dispatch_event(&ib_event);
} else if (us_ibdev->ufdev->link_up &&
!netif_carrier_ok(netdev)) {
usnic_fwd_carrier_down(us_ibdev->ufdev);
usnic_info("Link DOWN on %s\n",
dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
ib_event.event = IB_EVENT_PORT_ERR;
ib_event.device = &us_ibdev->ib_dev;
ib_event.element.port_num = 1;
ib_dispatch_event(&ib_event);
} else {
usnic_dbg("Ignoring %s on %s\n",
netdev_cmd_to_name(event),
dev_name(&us_ibdev->ib_dev.dev));
}
break;
default:
break;
}
mutex_unlock(&us_ibdev->usdev_lock);
}
static int usnic_ib_netdevice_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
@@ -358,6 +374,7 @@ static const struct ib_device_ops usnic_dev_ops = {
.query_port = usnic_ib_query_port,
.query_qp = usnic_ib_query_qp,
.reg_user_mr = usnic_ib_reg_mr,
.report_port_event = usnic_ib_handle_port_event,
INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_qp, usnic_ib_qp_grp, ibqp),

View File

@@ -143,6 +143,46 @@ static int pvrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
return 0;
}
static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
enum ib_event_type event)
{
struct ib_event ib_event;
memset(&ib_event, 0, sizeof(ib_event));
ib_event.device = &dev->ib_dev;
ib_event.element.port_num = port;
ib_event.event = event;
ib_dispatch_event(&ib_event);
}
static void pvrdma_report_event_handle(struct ib_device *ibdev,
struct net_device *ndev,
unsigned long event)
{
struct pvrdma_dev *dev = container_of(ibdev, struct pvrdma_dev, ib_dev);
switch (event) {
case NETDEV_DOWN:
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
break;
case NETDEV_UP:
pvrdma_write_reg(dev, PVRDMA_REG_CTL,
PVRDMA_DEVICE_CTL_UNQUIESCE);
mb();
if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
dev_err(&dev->pdev->dev,
"failed to activate device during link up\n");
else
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
break;
default:
break;
}
}
static const struct ib_device_ops pvrdma_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_VMW_PVRDMA,
@@ -181,6 +221,7 @@ static const struct ib_device_ops pvrdma_dev_ops = {
.query_qp = pvrdma_query_qp,
.reg_user_mr = pvrdma_reg_user_mr,
.req_notify_cq = pvrdma_req_notify_cq,
.report_port_event = pvrdma_report_event_handle,
INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
@@ -362,18 +403,6 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
}
}
static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
enum ib_event_type event)
{
struct ib_event ib_event;
memset(&ib_event, 0, sizeof(ib_event));
ib_event.device = &dev->ib_dev;
ib_event.element.port_num = port;
ib_event.event = event;
ib_dispatch_event(&ib_event);
}
static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
{
if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
@@ -666,21 +695,8 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
switch (event) {
case NETDEV_REBOOT:
case NETDEV_DOWN:
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
break;
case NETDEV_UP:
pvrdma_write_reg(dev, PVRDMA_REG_CTL,
PVRDMA_DEVICE_CTL_UNQUIESCE);
mb();
if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
dev_err(&dev->pdev->dev,
"failed to activate device during link up\n");
else
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
break;
case NETDEV_UNREGISTER:
ib_device_set_netdev(&dev->ib_dev, NULL, 1);
dev_put(dev->netdev);

View File

@@ -571,11 +571,6 @@ static void rxe_port_event(struct rxe_dev *rxe,
/* Caller must hold net_info_lock */
void rxe_port_up(struct rxe_dev *rxe)
{
struct rxe_port *port;
port = &rxe->port;
port->attr.state = IB_PORT_ACTIVE;
rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
dev_info(&rxe->ib_dev.dev, "set active\n");
}
@@ -583,11 +578,6 @@ void rxe_port_up(struct rxe_dev *rxe)
/* Caller must hold net_info_lock */
void rxe_port_down(struct rxe_dev *rxe)
{
struct rxe_port *port;
port = &rxe->port;
port->attr.state = IB_PORT_DOWN;
rxe_port_event(rxe, IB_EVENT_PORT_ERR);
rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
dev_info(&rxe->ib_dev.dev, "set down\n");
@@ -601,7 +591,7 @@ void rxe_set_port_state(struct rxe_dev *rxe)
if (!ndev)
return;
if (netif_running(ndev) && netif_carrier_ok(ndev))
if (ib_get_curr_port_state(ndev) == IB_PORT_ACTIVE)
rxe_port_up(rxe);
else
rxe_port_down(rxe);
@@ -623,18 +613,14 @@ static int rxe_notify(struct notifier_block *not_blk,
case NETDEV_UNREGISTER:
ib_unregister_device_queued(&rxe->ib_dev);
break;
case NETDEV_UP:
rxe_port_up(rxe);
break;
case NETDEV_DOWN:
rxe_port_down(rxe);
break;
case NETDEV_CHANGEMTU:
rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
rxe_set_mtu(rxe, ndev->mtu);
break;
case NETDEV_DOWN:
case NETDEV_CHANGE:
rxe_set_port_state(rxe);
if (ib_get_curr_port_state(ndev) == IB_PORT_DOWN)
rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
break;
case NETDEV_REBOOT:
case NETDEV_GOING_DOWN:

View File

@@ -129,7 +129,7 @@ enum rxe_device_param {
enum rxe_port_param {
RXE_PORT_GID_TBL_LEN = 1024,
RXE_PORT_PORT_CAP_FLAGS = IB_PORT_CM_SUP,
RXE_PORT_MAX_MSG_SZ = 0x800000,
RXE_PORT_MAX_MSG_SZ = (1UL << 31),
RXE_PORT_BAD_PKEY_CNTR = 0,
RXE_PORT_QKEY_VIOL_CNTR = 0,
RXE_PORT_LID = 0,

View File

@@ -178,7 +178,6 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
{
struct rxe_pool *pool = elem->pool;
struct xarray *xa = &pool->xa;
static int timeout = RXE_POOL_TIMEOUT;
int ret, err = 0;
void *xa_ret;
@@ -202,19 +201,19 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
* return to rdma-core
*/
if (sleepable) {
if (!completion_done(&elem->complete) && timeout) {
if (!completion_done(&elem->complete)) {
ret = wait_for_completion_timeout(&elem->complete,
timeout);
msecs_to_jiffies(50000));
/* Shouldn't happen. There are still references to
* the object but, rather than deadlock, free the
* object or pass back to rdma-core.
*/
if (WARN_ON(!ret))
err = -EINVAL;
err = -ETIMEDOUT;
}
} else {
unsigned long until = jiffies + timeout;
unsigned long until = jiffies + RXE_POOL_TIMEOUT;
/* AH objects are unique in that the destroy_ah verb
* can be called in atomic context. This delay
@@ -226,7 +225,7 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
mdelay(1);
if (WARN_ON(!completion_done(&elem->complete)))
err = -EINVAL;
err = -ETIMEDOUT;
}
if (pool->cleanup)

View File

@@ -62,6 +62,7 @@ static int rxe_query_port(struct ib_device *ibdev,
ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed,
&attr->active_width);
attr->state = ib_get_curr_port_state(ndev);
if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
else if (dev_get_flags(ndev) & IFF_UP)
@@ -696,7 +697,7 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
for (i = 0; i < ibwr->num_sge; i++)
length += ibwr->sg_list[i].length;
if (length > (1UL << 31)) {
if (length > RXE_PORT_MAX_MSG_SZ) {
rxe_err_qp(qp, "message length too long\n");
break;
}
@@ -980,8 +981,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
for (i = 0; i < num_sge; i++)
length += ibwr->sg_list[i].length;
/* IBA max message size is 2^31 */
if (length >= (1UL<<31)) {
if (length > RXE_PORT_MAX_MSG_SZ) {
err = -EINVAL;
rxe_dbg("message length too long\n");
goto err_out;

View File

@@ -379,14 +379,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
sdev = to_siw_dev(base_dev);
switch (event) {
case NETDEV_UP:
siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
break;
case NETDEV_DOWN:
siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
break;
case NETDEV_REGISTER:
/*
* Device registration now handled only by

View File

@@ -189,10 +189,9 @@ int siw_query_port(struct ib_device *base_dev, u32 port,
attr->max_msg_sz = -1;
attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
attr->phys_state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
attr->state = ib_get_curr_port_state(ndev);
attr->phys_state = attr->state == IB_PORT_ACTIVE ?
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
attr->state = attr->phys_state == IB_PORT_PHYS_STATE_LINK_UP ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
/*
* All zero

View File

@@ -584,6 +584,9 @@ static void dev_free(struct kref *ref)
list_del(&dev->entry);
mutex_unlock(&pool->mutex);
if (pool->ops && pool->ops->deinit)
pool->ops->deinit(dev);
ib_dealloc_pd(dev->ib_pd);
kfree(dev);
}

View File

@@ -3978,7 +3978,6 @@ static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
return host;
put_host:
device_del(&host->dev);
put_device(&host->dev);
return NULL;
}

View File

@@ -2854,6 +2854,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
__bnxt_queue_sp_work(bp);
async_event_process_exit:
bnxt_ulp_async_events(bp, cmpl);
return 0;
}

View File

@@ -298,6 +298,7 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
bool reset = false;
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
@@ -311,7 +312,9 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
ops = rtnl_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_stop)
return;
ops->ulp_irq_stop(ulp->handle);
if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
reset = true;
ops->ulp_irq_stop(ulp->handle, reset);
}
}
@@ -346,9 +349,36 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
int bnxt_register_async_events(struct bnxt_en_dev *edev,
unsigned long *events_bmap,
u16 max_id)
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
{
u16 event_id = le16_to_cpu(cmpl->event_id);
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
struct bnxt_ulp *ulp;
if (!bnxt_ulp_registered(edev))
return;
ulp = edev->ulp_tbl;
rcu_read_lock();
ops = rcu_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_async_notifier)
goto exit_unlock_rcu;
if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
goto exit_unlock_rcu;
/* Read max_async_event_id first before testing the bitmap. */
smp_rmb();
if (test_bit(event_id, ulp->async_events_bmap))
ops->ulp_async_notifier(ulp->handle, cmpl);
exit_unlock_rcu:
rcu_read_unlock();
}
void bnxt_register_async_events(struct bnxt_en_dev *edev,
unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -360,7 +390,6 @@ int bnxt_register_async_events(struct bnxt_en_dev *edev,
smp_wmb();
ulp->max_async_event_id = max_id;
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
return 0;
}
EXPORT_SYMBOL(bnxt_register_async_events);

View File

@@ -30,7 +30,9 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
void (*ulp_irq_stop)(void *);
/* async_notifier() cannot sleep (in BH context) */
void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
void (*ulp_irq_stop)(void *, bool);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -126,6 +128,6 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);
void bnxt_unregister_dev(struct bnxt_en_dev *edev);
int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
int bnxt_register_async_events(struct bnxt_en_dev *edev,
unsigned long *events_bmap, u16 max_id);
void bnxt_register_async_events(struct bnxt_en_dev *edev,
unsigned long *events_bmap, u16 max_id);
#endif

View File

@@ -63,22 +63,6 @@ int ib_find_cached_pkey(struct ib_device *device,
u16 pkey,
u16 *index);
/**
* ib_find_exact_cached_pkey - Returns the PKey table index where a specified
* PKey value occurs. Comparison uses the FULL 16 bits (incl membership bit)
* @device: The device to query.
* @port_num: The port number of the device to search for the PKey.
* @pkey: The PKey value to search for.
* @index: The index into the cached PKey table where the PKey was found.
*
* ib_find_exact_cached_pkey() searches the specified PKey table in
* the local software cache.
*/
int ib_find_exact_cached_pkey(struct ib_device *device,
u32 port_num,
u16 pkey,
u16 *index);
/**
* ib_get_cached_lmc - Returns a cached lmc table entry
* @device: The device to query.

View File

@@ -22,7 +22,4 @@ void ib_copy_ah_attr_to_user(struct ib_device *device,
void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
struct sa_path_rec *src);
void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
struct ib_user_path_rec *src);
#endif /* IB_USER_MARSHALL_H */

View File

@@ -283,7 +283,4 @@ int ib_ud_header_init(int payload_bytes,
int ib_ud_header_pack(struct ib_ud_header *header,
void *buf);
int ib_ud_header_unpack(void *buf,
struct ib_ud_header *header);
#endif /* IB_PACK_H */

View File

@@ -59,9 +59,6 @@ extern struct workqueue_struct *ib_comp_unbound_wq;
struct ib_ucq_object;
__printf(3, 4) __cold
void ibdev_printk(const char *level, const struct ib_device *ibdev,
const char *format, ...);
__printf(2, 3) __cold
void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
__printf(2, 3) __cold
@@ -2177,6 +2174,7 @@ struct ib_port_cache {
struct ib_gid_table *gid;
u8 lmc;
enum ib_port_state port_state;
enum ib_port_state last_port_state;
};
struct ib_port_immutable {
@@ -2256,7 +2254,9 @@ struct rdma_netdev_alloc_params {
struct ib_odp_counters {
atomic64_t faults;
atomic64_t faults_handled;
atomic64_t invalidations;
atomic64_t invalidations_handled;
atomic64_t prefetch;
};
@@ -2681,6 +2681,13 @@ struct ib_device_ops {
*/
void (*ufile_hw_cleanup)(struct ib_uverbs_file *ufile);
/**
* report_port_event - Drivers need to implement this if they have
* some private stuff to handle when link status changes.
*/
void (*report_port_event)(struct ib_device *ibdev,
struct net_device *ndev, unsigned long event);
DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
@@ -4469,6 +4476,17 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
u32 port);
int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
u32 *port);
static inline enum ib_port_state ib_get_curr_port_state(struct net_device *net_dev)
{
return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
}
void ib_dispatch_port_state_event(struct ib_device *ibdev,
struct net_device *ndev);
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);