RDMA/ionic: Register device ops for control path

Implement device supported verb APIs for control path.

Co-developed-by: Andrew Boyer <andrew.boyer@amd.com>
Signed-off-by: Andrew Boyer <andrew.boyer@amd.com>
Co-developed-by: Allen Hubbe <allen.hubbe@amd.com>
Signed-off-by: Allen Hubbe <allen.hubbe@amd.com>
Signed-off-by: Abhijit Gangurde <abhijit.gangurde@amd.com>
Link: https://patch.msgid.link/20250903061606.4139957-11-abhijit.gangurde@amd.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
Abhijit Gangurde 2025-09-03 11:46:02 +05:30 committed by Leon Romanovsky
parent f3bdbd4270
commit e8521822c7
7 changed files with 3741 additions and 9 deletions

View file

@ -627,6 +627,44 @@ static struct ionic_aq *ionic_create_rdma_adminq(struct ionic_ibdev *dev,
return ERR_PTR(rc);
}
static void ionic_flush_qs(struct ionic_ibdev *dev)
{
struct ionic_qp *qp, *qp_tmp;
struct ionic_cq *cq, *cq_tmp;
LIST_HEAD(flush_list);
unsigned long index;
WARN_ON(!irqs_disabled());
/* Flush qp send and recv */
xa_lock(&dev->qp_tbl);
xa_for_each(&dev->qp_tbl, index, qp) {
kref_get(&qp->qp_kref);
list_add_tail(&qp->ibkill_flush_ent, &flush_list);
}
xa_unlock(&dev->qp_tbl);
list_for_each_entry_safe(qp, qp_tmp, &flush_list, ibkill_flush_ent) {
ionic_flush_qp(dev, qp);
kref_put(&qp->qp_kref, ionic_qp_complete);
list_del(&qp->ibkill_flush_ent);
}
/* Notify completions */
xa_lock(&dev->cq_tbl);
xa_for_each(&dev->cq_tbl, index, cq) {
kref_get(&cq->cq_kref);
list_add_tail(&cq->ibkill_flush_ent, &flush_list);
}
xa_unlock(&dev->cq_tbl);
list_for_each_entry_safe(cq, cq_tmp, &flush_list, ibkill_flush_ent) {
ionic_notify_flush_cq(cq);
kref_put(&cq->cq_kref, ionic_cq_complete);
list_del(&cq->ibkill_flush_ent);
}
}
static void ionic_kill_ibdev(struct ionic_ibdev *dev, bool fatal_path)
{
unsigned long irqflags;
@ -650,6 +688,9 @@ static void ionic_kill_ibdev(struct ionic_ibdev *dev, bool fatal_path)
spin_unlock(&aq->lock);
}
if (do_flush)
ionic_flush_qs(dev);
local_irq_restore(irqflags);
/* Post a fatal event if requested */
@ -789,6 +830,65 @@ static void ionic_cq_event(struct ionic_ibdev *dev, u32 cqid, u8 code)
kref_put(&cq->cq_kref, ionic_cq_complete);
}
static void ionic_qp_event(struct ionic_ibdev *dev, u32 qpid, u8 code)
{
unsigned long irqflags;
struct ib_event ibev;
struct ionic_qp *qp;
xa_lock_irqsave(&dev->qp_tbl, irqflags);
qp = xa_load(&dev->qp_tbl, qpid);
if (qp)
kref_get(&qp->qp_kref);
xa_unlock_irqrestore(&dev->qp_tbl, irqflags);
if (!qp) {
ibdev_dbg(&dev->ibdev,
"missing qpid %#x code %u\n", qpid, code);
return;
}
ibev.device = &dev->ibdev;
ibev.element.qp = &qp->ibqp;
switch (code) {
case IONIC_V1_EQE_SQ_DRAIN:
ibev.event = IB_EVENT_SQ_DRAINED;
break;
case IONIC_V1_EQE_QP_COMM_EST:
ibev.event = IB_EVENT_COMM_EST;
break;
case IONIC_V1_EQE_QP_LAST_WQE:
ibev.event = IB_EVENT_QP_LAST_WQE_REACHED;
break;
case IONIC_V1_EQE_QP_ERR:
ibev.event = IB_EVENT_QP_FATAL;
break;
case IONIC_V1_EQE_QP_ERR_REQUEST:
ibev.event = IB_EVENT_QP_REQ_ERR;
break;
case IONIC_V1_EQE_QP_ERR_ACCESS:
ibev.event = IB_EVENT_QP_ACCESS_ERR;
break;
default:
ibdev_dbg(&dev->ibdev,
"unrecognized qpid %#x code %u\n", qpid, code);
goto out;
}
if (qp->ibqp.event_handler)
qp->ibqp.event_handler(&ibev, qp->ibqp.qp_context);
out:
kref_put(&qp->qp_kref, ionic_qp_complete);
}
static u16 ionic_poll_eq(struct ionic_eq *eq, u16 budget)
{
struct ionic_ibdev *dev = eq->dev;
@ -818,6 +918,10 @@ static u16 ionic_poll_eq(struct ionic_eq *eq, u16 budget)
ionic_cq_event(dev, qid, code);
break;
case IONIC_V1_EQE_TYPE_QP:
ionic_qp_event(dev, qid, code);
break;
default:
ibdev_dbg(&dev->ibdev,
"unknown event %#x type %u\n", evt, type);

File diff suppressed because it is too large Load diff

View file

@ -5,6 +5,266 @@
#define _IONIC_FW_H_
#include <linux/kernel.h>
#include <rdma/ib_verbs.h>
/* common for ib spec */
#define IONIC_EXP_DBELL_SZ 8
enum ionic_mrid_bits {
IONIC_MRID_INDEX_SHIFT = 8,
};
static inline u32 ionic_mrid(u32 index, u8 key)
{
return (index << IONIC_MRID_INDEX_SHIFT) | key;
}
static inline u32 ionic_mrid_index(u32 lrkey)
{
return lrkey >> IONIC_MRID_INDEX_SHIFT;
}
/* common to all versions */
/* wqe scatter gather element */
struct ionic_sge {
__be64 va;
__be32 len;
__be32 lkey;
};
/* admin queue mr type */
enum ionic_mr_flags {
/* bits that determine mr access */
IONIC_MRF_LOCAL_WRITE = BIT(0),
IONIC_MRF_REMOTE_WRITE = BIT(1),
IONIC_MRF_REMOTE_READ = BIT(2),
IONIC_MRF_REMOTE_ATOMIC = BIT(3),
IONIC_MRF_MW_BIND = BIT(4),
IONIC_MRF_ZERO_BASED = BIT(5),
IONIC_MRF_ON_DEMAND = BIT(6),
IONIC_MRF_PB = BIT(7),
IONIC_MRF_ACCESS_MASK = BIT(12) - 1,
/* bits that determine mr type */
IONIC_MRF_UKEY_EN = BIT(13),
IONIC_MRF_IS_MW = BIT(14),
IONIC_MRF_INV_EN = BIT(15),
/* base flags combinations for mr types */
IONIC_MRF_USER_MR = 0,
IONIC_MRF_PHYS_MR = (IONIC_MRF_UKEY_EN |
IONIC_MRF_INV_EN),
IONIC_MRF_MW_1 = (IONIC_MRF_UKEY_EN |
IONIC_MRF_IS_MW),
IONIC_MRF_MW_2 = (IONIC_MRF_UKEY_EN |
IONIC_MRF_IS_MW |
IONIC_MRF_INV_EN),
};
static inline int to_ionic_mr_flags(int access)
{
int flags = 0;
if (access & IB_ACCESS_LOCAL_WRITE)
flags |= IONIC_MRF_LOCAL_WRITE;
if (access & IB_ACCESS_REMOTE_READ)
flags |= IONIC_MRF_REMOTE_READ;
if (access & IB_ACCESS_REMOTE_WRITE)
flags |= IONIC_MRF_REMOTE_WRITE;
if (access & IB_ACCESS_REMOTE_ATOMIC)
flags |= IONIC_MRF_REMOTE_ATOMIC;
if (access & IB_ACCESS_MW_BIND)
flags |= IONIC_MRF_MW_BIND;
if (access & IB_ZERO_BASED)
flags |= IONIC_MRF_ZERO_BASED;
return flags;
}
enum ionic_qp_flags {
/* bits that determine qp access */
IONIC_QPF_REMOTE_WRITE = BIT(0),
IONIC_QPF_REMOTE_READ = BIT(1),
IONIC_QPF_REMOTE_ATOMIC = BIT(2),
/* bits that determine other qp behavior */
IONIC_QPF_SQ_PB = BIT(6),
IONIC_QPF_RQ_PB = BIT(7),
IONIC_QPF_SQ_SPEC = BIT(8),
IONIC_QPF_RQ_SPEC = BIT(9),
IONIC_QPF_REMOTE_PRIVILEGED = BIT(10),
IONIC_QPF_SQ_DRAINING = BIT(11),
IONIC_QPF_SQD_NOTIFY = BIT(12),
IONIC_QPF_SQ_CMB = BIT(13),
IONIC_QPF_RQ_CMB = BIT(14),
IONIC_QPF_PRIVILEGED = BIT(15),
};
static inline int from_ionic_qp_flags(int flags)
{
int access_flags = 0;
if (flags & IONIC_QPF_REMOTE_WRITE)
access_flags |= IB_ACCESS_REMOTE_WRITE;
if (flags & IONIC_QPF_REMOTE_READ)
access_flags |= IB_ACCESS_REMOTE_READ;
if (flags & IONIC_QPF_REMOTE_ATOMIC)
access_flags |= IB_ACCESS_REMOTE_ATOMIC;
return access_flags;
}
static inline int to_ionic_qp_flags(int access, bool sqd_notify,
bool sq_is_cmb, bool rq_is_cmb,
bool sq_spec, bool rq_spec,
bool privileged, bool remote_privileged)
{
int flags = 0;
if (access & IB_ACCESS_REMOTE_WRITE)
flags |= IONIC_QPF_REMOTE_WRITE;
if (access & IB_ACCESS_REMOTE_READ)
flags |= IONIC_QPF_REMOTE_READ;
if (access & IB_ACCESS_REMOTE_ATOMIC)
flags |= IONIC_QPF_REMOTE_ATOMIC;
if (sqd_notify)
flags |= IONIC_QPF_SQD_NOTIFY;
if (sq_is_cmb)
flags |= IONIC_QPF_SQ_CMB;
if (rq_is_cmb)
flags |= IONIC_QPF_RQ_CMB;
if (sq_spec)
flags |= IONIC_QPF_SQ_SPEC;
if (rq_spec)
flags |= IONIC_QPF_RQ_SPEC;
if (privileged)
flags |= IONIC_QPF_PRIVILEGED;
if (remote_privileged)
flags |= IONIC_QPF_REMOTE_PRIVILEGED;
return flags;
}
/* admin queue qp type */
enum ionic_qp_type {
IONIC_QPT_RC,
IONIC_QPT_UC,
IONIC_QPT_RD,
IONIC_QPT_UD,
IONIC_QPT_SRQ,
IONIC_QPT_XRC_INI,
IONIC_QPT_XRC_TGT,
IONIC_QPT_XRC_SRQ,
};
static inline int to_ionic_qp_type(enum ib_qp_type type)
{
switch (type) {
case IB_QPT_GSI:
case IB_QPT_UD:
return IONIC_QPT_UD;
case IB_QPT_RC:
return IONIC_QPT_RC;
case IB_QPT_UC:
return IONIC_QPT_UC;
case IB_QPT_XRC_INI:
return IONIC_QPT_XRC_INI;
case IB_QPT_XRC_TGT:
return IONIC_QPT_XRC_TGT;
default:
return -EINVAL;
}
}
/* admin queue qp state */
enum ionic_qp_state {
IONIC_QPS_RESET,
IONIC_QPS_INIT,
IONIC_QPS_RTR,
IONIC_QPS_RTS,
IONIC_QPS_SQD,
IONIC_QPS_SQE,
IONIC_QPS_ERR,
};
static inline int from_ionic_qp_state(enum ionic_qp_state state)
{
switch (state) {
case IONIC_QPS_RESET:
return IB_QPS_RESET;
case IONIC_QPS_INIT:
return IB_QPS_INIT;
case IONIC_QPS_RTR:
return IB_QPS_RTR;
case IONIC_QPS_RTS:
return IB_QPS_RTS;
case IONIC_QPS_SQD:
return IB_QPS_SQD;
case IONIC_QPS_SQE:
return IB_QPS_SQE;
case IONIC_QPS_ERR:
return IB_QPS_ERR;
default:
return -EINVAL;
}
}
static inline int to_ionic_qp_state(enum ib_qp_state state)
{
switch (state) {
case IB_QPS_RESET:
return IONIC_QPS_RESET;
case IB_QPS_INIT:
return IONIC_QPS_INIT;
case IB_QPS_RTR:
return IONIC_QPS_RTR;
case IB_QPS_RTS:
return IONIC_QPS_RTS;
case IB_QPS_SQD:
return IONIC_QPS_SQD;
case IB_QPS_SQE:
return IONIC_QPS_SQE;
case IB_QPS_ERR:
return IONIC_QPS_ERR;
default:
return 0;
}
}
static inline int to_ionic_qp_modify_state(enum ib_qp_state to_state,
enum ib_qp_state from_state)
{
return to_ionic_qp_state(to_state) |
(to_ionic_qp_state(from_state) << 4);
}
/* fw abi v1 */
/* data payload part of v1 wqe */
union ionic_v1_pld {
struct ionic_sge sgl[2];
__be32 spec32[8];
__be16 spec16[16];
__u8 data[32];
};
/* completion queue v1 cqe */
struct ionic_v1_cqe {
@ -78,6 +338,390 @@ static inline u32 ionic_v1_cqe_qtf_qid(u32 qtf)
return qtf >> IONIC_V1_CQE_QID_SHIFT;
}
/* v1 base wqe header */
struct ionic_v1_base_hdr {
__u64 wqe_id;
__u8 op;
__u8 num_sge_key;
__be16 flags;
__be32 imm_data_key;
};
/* v1 receive wqe body */
struct ionic_v1_recv_bdy {
__u8 rsvd[16];
union ionic_v1_pld pld;
};
/* v1 send/rdma wqe body (common, has sgl) */
struct ionic_v1_common_bdy {
union {
struct {
__be32 ah_id;
__be32 dest_qpn;
__be32 dest_qkey;
} send;
struct {
__be32 remote_va_high;
__be32 remote_va_low;
__be32 remote_rkey;
} rdma;
};
__be32 length;
union ionic_v1_pld pld;
};
/* v1 atomic wqe body */
struct ionic_v1_atomic_bdy {
__be32 remote_va_high;
__be32 remote_va_low;
__be32 remote_rkey;
__be32 swap_add_high;
__be32 swap_add_low;
__be32 compare_high;
__be32 compare_low;
__u8 rsvd[4];
struct ionic_sge sge;
};
/* v1 reg mr wqe body */
struct ionic_v1_reg_mr_bdy {
__be64 va;
__be64 length;
__be64 offset;
__be64 dma_addr;
__be32 map_count;
__be16 flags;
__u8 dir_size_log2;
__u8 page_size_log2;
__u8 rsvd[8];
};
/* v1 bind mw wqe body */
struct ionic_v1_bind_mw_bdy {
__be64 va;
__be64 length;
__be32 lkey;
__be16 flags;
__u8 rsvd[26];
};
/* v1 send/recv wqe */
struct ionic_v1_wqe {
struct ionic_v1_base_hdr base;
union {
struct ionic_v1_recv_bdy recv;
struct ionic_v1_common_bdy common;
struct ionic_v1_atomic_bdy atomic;
struct ionic_v1_reg_mr_bdy reg_mr;
struct ionic_v1_bind_mw_bdy bind_mw;
};
};
/* queue pair v1 send opcodes */
enum ionic_v1_op {
IONIC_V1_OP_SEND,
IONIC_V1_OP_SEND_INV,
IONIC_V1_OP_SEND_IMM,
IONIC_V1_OP_RDMA_READ,
IONIC_V1_OP_RDMA_WRITE,
IONIC_V1_OP_RDMA_WRITE_IMM,
IONIC_V1_OP_ATOMIC_CS,
IONIC_V1_OP_ATOMIC_FA,
IONIC_V1_OP_REG_MR,
IONIC_V1_OP_LOCAL_INV,
IONIC_V1_OP_BIND_MW,
/* flags */
IONIC_V1_FLAG_FENCE = BIT(0),
IONIC_V1_FLAG_SOL = BIT(1),
IONIC_V1_FLAG_INL = BIT(2),
IONIC_V1_FLAG_SIG = BIT(3),
/* flags last four bits for sgl spec format */
IONIC_V1_FLAG_SPEC32 = (1u << 12),
IONIC_V1_FLAG_SPEC16 = (2u << 12),
IONIC_V1_SPEC_FIRST_SGE = 2,
};
static inline size_t ionic_v1_send_wqe_min_size(int min_sge, int min_data,
int spec, bool expdb)
{
size_t sz_wqe, sz_sgl, sz_data;
if (spec > IONIC_V1_SPEC_FIRST_SGE)
min_sge += IONIC_V1_SPEC_FIRST_SGE;
if (expdb) {
min_sge += 1;
min_data += IONIC_EXP_DBELL_SZ;
}
sz_wqe = sizeof(struct ionic_v1_wqe);
sz_sgl = offsetof(struct ionic_v1_wqe, common.pld.sgl[min_sge]);
sz_data = offsetof(struct ionic_v1_wqe, common.pld.data[min_data]);
if (sz_sgl > sz_wqe)
sz_wqe = sz_sgl;
if (sz_data > sz_wqe)
sz_wqe = sz_data;
return sz_wqe;
}
static inline int ionic_v1_send_wqe_max_sge(u8 stride_log2, int spec,
bool expdb)
{
struct ionic_sge *sge = (void *)(1ull << stride_log2);
struct ionic_v1_wqe *wqe = (void *)0;
int num_sge = 0;
if (expdb)
sge -= 1;
if (spec > IONIC_V1_SPEC_FIRST_SGE)
num_sge = IONIC_V1_SPEC_FIRST_SGE;
num_sge = sge - &wqe->common.pld.sgl[num_sge];
if (spec && num_sge > spec)
num_sge = spec;
return num_sge;
}
static inline int ionic_v1_send_wqe_max_data(u8 stride_log2, bool expdb)
{
struct ionic_v1_wqe *wqe = (void *)0;
__u8 *data = (void *)(1ull << stride_log2);
if (expdb)
data -= IONIC_EXP_DBELL_SZ;
return data - wqe->common.pld.data;
}
static inline size_t ionic_v1_recv_wqe_min_size(int min_sge, int spec,
bool expdb)
{
size_t sz_wqe, sz_sgl;
if (spec > IONIC_V1_SPEC_FIRST_SGE)
min_sge += IONIC_V1_SPEC_FIRST_SGE;
if (expdb)
min_sge += 1;
sz_wqe = sizeof(struct ionic_v1_wqe);
sz_sgl = offsetof(struct ionic_v1_wqe, recv.pld.sgl[min_sge]);
if (sz_sgl > sz_wqe)
sz_wqe = sz_sgl;
return sz_wqe;
}
static inline int ionic_v1_recv_wqe_max_sge(u8 stride_log2, int spec,
bool expdb)
{
struct ionic_sge *sge = (void *)(1ull << stride_log2);
struct ionic_v1_wqe *wqe = (void *)0;
int num_sge = 0;
if (expdb)
sge -= 1;
if (spec > IONIC_V1_SPEC_FIRST_SGE)
num_sge = IONIC_V1_SPEC_FIRST_SGE;
num_sge = sge - &wqe->recv.pld.sgl[num_sge];
if (spec && num_sge > spec)
num_sge = spec;
return num_sge;
}
static inline int ionic_v1_use_spec_sge(int min_sge, int spec)
{
if (!spec || min_sge > spec)
return 0;
if (min_sge <= IONIC_V1_SPEC_FIRST_SGE)
return IONIC_V1_SPEC_FIRST_SGE;
return spec;
}
struct ionic_admin_create_ah {
__le64 dma_addr;
__le32 length;
__le32 pd_id;
__le32 id_ver;
__le16 dbid_flags;
__u8 csum_profile;
__u8 crypto;
} __packed;
#define IONIC_ADMIN_CREATE_AH_IN_V1_LEN 24
static_assert(sizeof(struct ionic_admin_create_ah) ==
IONIC_ADMIN_CREATE_AH_IN_V1_LEN);
struct ionic_admin_destroy_ah {
__le32 ah_id;
} __packed;
#define IONIC_ADMIN_DESTROY_AH_IN_V1_LEN 4
static_assert(sizeof(struct ionic_admin_destroy_ah) ==
IONIC_ADMIN_DESTROY_AH_IN_V1_LEN);
struct ionic_admin_query_ah {
__le64 dma_addr;
} __packed;
#define IONIC_ADMIN_QUERY_AH_IN_V1_LEN 8
static_assert(sizeof(struct ionic_admin_query_ah) ==
IONIC_ADMIN_QUERY_AH_IN_V1_LEN);
struct ionic_admin_create_mr {
__le64 va;
__le64 length;
__le32 pd_id;
__le32 id_ver;
__le32 tbl_index;
__le32 map_count;
__le64 dma_addr;
__le16 dbid_flags;
__u8 pt_type;
__u8 dir_size_log2;
__u8 page_size_log2;
} __packed;
#define IONIC_ADMIN_CREATE_MR_IN_V1_LEN 45
static_assert(sizeof(struct ionic_admin_create_mr) ==
IONIC_ADMIN_CREATE_MR_IN_V1_LEN);
struct ionic_admin_destroy_mr {
__le32 mr_id;
} __packed;
#define IONIC_ADMIN_DESTROY_MR_IN_V1_LEN 4
static_assert(sizeof(struct ionic_admin_destroy_mr) ==
IONIC_ADMIN_DESTROY_MR_IN_V1_LEN);
struct ionic_admin_create_cq {
__le32 eq_id;
__u8 depth_log2;
__u8 stride_log2;
__u8 dir_size_log2_rsvd;
__u8 page_size_log2;
__le32 cq_flags;
__le32 id_ver;
__le32 tbl_index;
__le32 map_count;
__le64 dma_addr;
__le16 dbid_flags;
} __packed;
#define IONIC_ADMIN_CREATE_CQ_IN_V1_LEN 34
static_assert(sizeof(struct ionic_admin_create_cq) ==
IONIC_ADMIN_CREATE_CQ_IN_V1_LEN);
struct ionic_admin_destroy_cq {
__le32 cq_id;
} __packed;
#define IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN 4
static_assert(sizeof(struct ionic_admin_destroy_cq) ==
IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN);
struct ionic_admin_create_qp {
__le32 pd_id;
__be32 priv_flags;
__le32 sq_cq_id;
__u8 sq_depth_log2;
__u8 sq_stride_log2;
__u8 sq_dir_size_log2_rsvd;
__u8 sq_page_size_log2;
__le32 sq_tbl_index_xrcd_id;
__le32 sq_map_count;
__le64 sq_dma_addr;
__le32 rq_cq_id;
__u8 rq_depth_log2;
__u8 rq_stride_log2;
__u8 rq_dir_size_log2_rsvd;
__u8 rq_page_size_log2;
__le32 rq_tbl_index_srq_id;
__le32 rq_map_count;
__le64 rq_dma_addr;
__le32 id_ver;
__le16 dbid_flags;
__u8 type_state;
__u8 rsvd;
} __packed;
#define IONIC_ADMIN_CREATE_QP_IN_V1_LEN 64
static_assert(sizeof(struct ionic_admin_create_qp) ==
IONIC_ADMIN_CREATE_QP_IN_V1_LEN);
struct ionic_admin_destroy_qp {
__le32 qp_id;
} __packed;
#define IONIC_ADMIN_DESTROY_QP_IN_V1_LEN 4
static_assert(sizeof(struct ionic_admin_destroy_qp) ==
IONIC_ADMIN_DESTROY_QP_IN_V1_LEN);
struct ionic_admin_mod_qp {
__be32 attr_mask;
__u8 dcqcn_profile;
__u8 tfp_csum_profile;
__be16 access_flags;
__le32 rq_psn;
__le32 sq_psn;
__le32 qkey_dest_qpn;
__le32 rate_limit_kbps;
__u8 pmtu;
__u8 retry;
__u8 rnr_timer;
__u8 retry_timeout;
__u8 rsq_depth;
__u8 rrq_depth;
__le16 pkey_id;
__le32 ah_id_len;
__u8 en_pcp;
__u8 ip_dscp;
__u8 rsvd2;
__u8 type_state;
union {
struct {
__le16 rsvd1;
};
__le32 rrq_index;
};
__le32 rsq_index;
__le64 dma_addr;
__le32 id_ver;
} __packed;
#define IONIC_ADMIN_MODIFY_QP_IN_V1_LEN 60
static_assert(sizeof(struct ionic_admin_mod_qp) ==
IONIC_ADMIN_MODIFY_QP_IN_V1_LEN);
struct ionic_admin_query_qp {
__le64 hdr_dma_addr;
__le64 sq_dma_addr;
__le64 rq_dma_addr;
__le32 ah_id;
__le32 id_ver;
__le16 dbid_flags;
} __packed;
#define IONIC_ADMIN_QUERY_QP_IN_V1_LEN 34
static_assert(sizeof(struct ionic_admin_query_qp) ==
IONIC_ADMIN_QUERY_QP_IN_V1_LEN);
#define ADMIN_WQE_STRIDE 64
#define ADMIN_WQE_HDR_LEN 4
@ -88,9 +732,66 @@ struct ionic_v1_admin_wqe {
__le16 len;
union {
struct ionic_admin_create_ah create_ah;
struct ionic_admin_destroy_ah destroy_ah;
struct ionic_admin_query_ah query_ah;
struct ionic_admin_create_mr create_mr;
struct ionic_admin_destroy_mr destroy_mr;
struct ionic_admin_create_cq create_cq;
struct ionic_admin_destroy_cq destroy_cq;
struct ionic_admin_create_qp create_qp;
struct ionic_admin_destroy_qp destroy_qp;
struct ionic_admin_mod_qp mod_qp;
struct ionic_admin_query_qp query_qp;
} cmd;
};
/* side data for query qp */
struct ionic_v1_admin_query_qp_sq {
__u8 rnr_timer;
__u8 retry_timeout;
__be16 access_perms_flags;
__be16 rsvd;
__be16 pkey_id;
__be32 qkey_dest_qpn;
__be32 rate_limit_kbps;
__be32 rq_psn;
};
struct ionic_v1_admin_query_qp_rq {
__u8 state_pmtu;
__u8 retry_rnrtry;
__u8 rrq_depth;
__u8 rsq_depth;
__be32 sq_psn;
__be16 access_perms_flags;
__be16 rsvd;
};
/* admin queue v1 opcodes */
enum ionic_v1_admin_op {
IONIC_V1_ADMIN_NOOP,
IONIC_V1_ADMIN_CREATE_CQ,
IONIC_V1_ADMIN_CREATE_QP,
IONIC_V1_ADMIN_CREATE_MR,
IONIC_V1_ADMIN_STATS_HDRS,
IONIC_V1_ADMIN_STATS_VALS,
IONIC_V1_ADMIN_DESTROY_MR,
IONIC_V1_ADMIN_RSVD_7, /* RESIZE_CQ */
IONIC_V1_ADMIN_DESTROY_CQ,
IONIC_V1_ADMIN_MODIFY_QP,
IONIC_V1_ADMIN_QUERY_QP,
IONIC_V1_ADMIN_DESTROY_QP,
IONIC_V1_ADMIN_DEBUG,
IONIC_V1_ADMIN_CREATE_AH,
IONIC_V1_ADMIN_QUERY_AH,
IONIC_V1_ADMIN_MODIFY_DCQCN,
IONIC_V1_ADMIN_DESTROY_AH,
IONIC_V1_ADMIN_QP_STATS_HDRS,
IONIC_V1_ADMIN_QP_STATS_VALS,
IONIC_V1_ADMIN_OPCODES_MAX,
};
/* admin queue v1 cqe status */
enum ionic_v1_admin_status {
IONIC_V1_ASTS_OK,
@ -136,6 +837,22 @@ enum ionic_v1_eqe_evt_bits {
IONIC_V1_EQE_QP_ERR_ACCESS = 10,
};
enum ionic_tfp_csum_profiles {
IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP = 0,
IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP = 1,
IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP = 2,
IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP = 3,
IONIC_TFP_CSUM_PROF_IPV4_UDP_VXLAN_ETH_QTAG_IPV4_UDP = 4,
IONIC_TFP_CSUM_PROF_IPV4_UDP_VXLAN_ETH_QTAG_IPV6_UDP = 5,
IONIC_TFP_CSUM_PROF_QTAG_IPV4_UDP_VXLAN_ETH_QTAG_IPV4_UDP = 6,
IONIC_TFP_CSUM_PROF_QTAG_IPV4_UDP_VXLAN_ETH_QTAG_IPV6_UDP = 7,
IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_ESP_IPV4_UDP = 8,
IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_ESP_UDP = 9,
IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_ESP_UDP = 10,
IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_ESP_UDP = 11,
IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_CSUM = 12,
};
static inline bool ionic_v1_eqe_color(struct ionic_v1_eqe *eqe)
{
return eqe->evt & cpu_to_be32(IONIC_V1_EQE_COLOR);

View file

@ -15,6 +15,44 @@ MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS("NET_IONIC");
static const struct ib_device_ops ionic_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_IONIC,
.uverbs_abi_ver = IONIC_ABI_VERSION,
.alloc_ucontext = ionic_alloc_ucontext,
.dealloc_ucontext = ionic_dealloc_ucontext,
.mmap = ionic_mmap,
.mmap_free = ionic_mmap_free,
.alloc_pd = ionic_alloc_pd,
.dealloc_pd = ionic_dealloc_pd,
.create_ah = ionic_create_ah,
.query_ah = ionic_query_ah,
.destroy_ah = ionic_destroy_ah,
.create_user_ah = ionic_create_ah,
.get_dma_mr = ionic_get_dma_mr,
.reg_user_mr = ionic_reg_user_mr,
.reg_user_mr_dmabuf = ionic_reg_user_mr_dmabuf,
.dereg_mr = ionic_dereg_mr,
.alloc_mr = ionic_alloc_mr,
.map_mr_sg = ionic_map_mr_sg,
.alloc_mw = ionic_alloc_mw,
.dealloc_mw = ionic_dealloc_mw,
.create_cq = ionic_create_cq,
.destroy_cq = ionic_destroy_cq,
.create_qp = ionic_create_qp,
.modify_qp = ionic_modify_qp,
.query_qp = ionic_query_qp,
.destroy_qp = ionic_destroy_qp,
INIT_RDMA_OBJ_SIZE(ib_ucontext, ionic_ctx, ibctx),
INIT_RDMA_OBJ_SIZE(ib_pd, ionic_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_ah, ionic_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, ionic_vcq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_qp, ionic_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_mw, ionic_mr, ibmw),
};
static void ionic_init_resids(struct ionic_ibdev *dev)
{
ionic_resid_init(&dev->inuse_cqid, dev->lif_cfg.cq_count);
@ -48,6 +86,8 @@ static void ionic_destroy_ibdev(struct ionic_ibdev *dev)
ib_unregister_device(&dev->ibdev);
ionic_destroy_rdma_admin(dev);
ionic_destroy_resids(dev);
WARN_ON(!xa_empty(&dev->qp_tbl));
xa_destroy(&dev->qp_tbl);
WARN_ON(!xa_empty(&dev->cq_tbl));
xa_destroy(&dev->cq_tbl);
ib_dealloc_device(&dev->ibdev);
@ -66,6 +106,7 @@ static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
ionic_fill_lif_cfg(ionic_adev->lif, &dev->lif_cfg);
xa_init_flags(&dev->qp_tbl, GFP_ATOMIC);
xa_init_flags(&dev->cq_tbl, GFP_ATOMIC);
ionic_init_resids(dev);
@ -98,6 +139,8 @@ static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
if (rc)
goto err_admin;
ib_set_device_ops(&dev->ibdev, &ionic_dev_ops);
rc = ib_register_device(ibdev, "ionic_%d", ibdev->dev.parent);
if (rc)
goto err_register;
@ -110,6 +153,7 @@ static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
ionic_destroy_rdma_admin(dev);
err_reset:
ionic_destroy_resids(dev);
xa_destroy(&dev->qp_tbl);
xa_destroy(&dev->cq_tbl);
ib_dealloc_device(&dev->ibdev);
@ -161,7 +205,7 @@ static int __init ionic_mod_init(void)
{
int rc;
ionic_evt_workq = create_workqueue(DRIVER_NAME "-evt");
ionic_evt_workq = create_workqueue(KBUILD_MODNAME "-evt");
if (!ionic_evt_workq)
return -ENOMEM;

View file

@ -6,7 +6,10 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
#include <rdma/uverbs_ioctl.h>
#include <rdma/ionic-abi.h>
#include <ionic_api.h>
#include <ionic_regs.h>
@ -24,9 +27,26 @@
#define IONIC_EQ_ISR_BUDGET 10
#define IONIC_EQ_WORK_BUDGET 1000
#define IONIC_MAX_PD 1024
#define IONIC_SPEC_HIGH 8
#define IONIC_SQCMB_ORDER 5
#define IONIC_RQCMB_ORDER 0
#define IONIC_META_LAST ((void *)1ul)
#define IONIC_META_POSTED ((void *)2ul)
#define IONIC_CQ_GRACE 100
#define IONIC_ROCE_UDP_SPORT 28272
#define IONIC_DMA_LKEY 0
#define IONIC_DMA_RKEY IONIC_DMA_LKEY
#define IONIC_CMB_SUPPORTED \
(IONIC_CMB_ENABLE | IONIC_CMB_REQUIRE | IONIC_CMB_EXPDB | \
IONIC_CMB_WC | IONIC_CMB_UC)
/* resource is not reserved on the device, indicated in tbl_order */
#define IONIC_RES_INVALID -1
struct ionic_aq;
struct ionic_cq;
struct ionic_eq;
@ -44,14 +64,6 @@ enum ionic_admin_flags {
IONIC_ADMIN_F_INTERRUPT = BIT(2), /* Interruptible w/timeout */
};
struct ionic_qdesc {
__aligned_u64 addr;
__u32 size;
__u16 mask;
__u8 depth_log2;
__u8 stride_log2;
};
enum ionic_mmap_flag {
IONIC_MMAP_WC = BIT(0),
};
@ -160,6 +172,13 @@ struct ionic_tbl_buf {
u8 page_size_log2;
};
struct ionic_pd {
struct ib_pd ibpd;
u32 pdid;
u32 flags;
};
struct ionic_cq {
struct ionic_vcq *vcq;
@ -193,11 +212,188 @@ struct ionic_vcq {
u8 poll_idx;
};
struct ionic_sq_meta {
u64 wrid;
u32 len;
u16 seq;
u8 ibop;
u8 ibsts;
u8 remote:1;
u8 signal:1;
u8 local_comp:1;
};
struct ionic_rq_meta {
struct ionic_rq_meta *next;
u64 wrid;
};
struct ionic_qp {
struct ib_qp ibqp;
enum ib_qp_state state;
u32 qpid;
u32 ahid;
u32 sq_cqid;
u32 rq_cqid;
u8 udma_idx;
u8 has_ah:1;
u8 has_sq:1;
u8 has_rq:1;
u8 sig_all:1;
struct list_head qp_list_counter;
struct list_head cq_poll_sq;
struct list_head cq_flush_sq;
struct list_head cq_flush_rq;
struct list_head ibkill_flush_ent;
spinlock_t sq_lock; /* for posting and polling */
struct ionic_queue sq;
struct ionic_sq_meta *sq_meta;
u16 *sq_msn_idx;
int sq_spec;
u16 sq_old_prod;
u16 sq_msn_prod;
u16 sq_msn_cons;
u8 sq_cmb;
bool sq_flush;
bool sq_flush_rcvd;
spinlock_t rq_lock; /* for posting and polling */
struct ionic_queue rq;
struct ionic_rq_meta *rq_meta;
struct ionic_rq_meta *rq_meta_head;
int rq_spec;
u16 rq_old_prod;
u8 rq_cmb;
bool rq_flush;
struct kref qp_kref;
struct completion qp_rel_comp;
/* infrequently accessed, keep at end */
int sgid_index;
int sq_cmb_order;
u32 sq_cmb_pgid;
phys_addr_t sq_cmb_addr;
struct rdma_user_mmap_entry *mmap_sq_cmb;
struct ib_umem *sq_umem;
int rq_cmb_order;
u32 rq_cmb_pgid;
phys_addr_t rq_cmb_addr;
struct rdma_user_mmap_entry *mmap_rq_cmb;
struct ib_umem *rq_umem;
int dcqcn_profile;
struct ib_ud_header *hdr;
};
struct ionic_ah {
struct ib_ah ibah;
u32 ahid;
int sgid_index;
struct ib_ud_header hdr;
};
struct ionic_mr {
union {
struct ib_mr ibmr;
struct ib_mw ibmw;
};
u32 mrid;
int flags;
struct ib_umem *umem;
struct ionic_tbl_buf buf;
bool created;
};
static inline struct ionic_ibdev *to_ionic_ibdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct ionic_ibdev, ibdev);
}
static inline struct ionic_ctx *to_ionic_ctx(struct ib_ucontext *ibctx)
{
return container_of(ibctx, struct ionic_ctx, ibctx);
}
static inline struct ionic_ctx *to_ionic_ctx_uobj(struct ib_uobject *uobj)
{
if (!uobj)
return NULL;
if (!uobj->context)
return NULL;
return to_ionic_ctx(uobj->context);
}
static inline struct ionic_pd *to_ionic_pd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct ionic_pd, ibpd);
}
static inline struct ionic_mr *to_ionic_mr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct ionic_mr, ibmr);
}
static inline struct ionic_mr *to_ionic_mw(struct ib_mw *ibmw)
{
return container_of(ibmw, struct ionic_mr, ibmw);
}
static inline struct ionic_vcq *to_ionic_vcq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct ionic_vcq, ibcq);
}
static inline struct ionic_cq *to_ionic_vcq_cq(struct ib_cq *ibcq,
uint8_t udma_idx)
{
return &to_ionic_vcq(ibcq)->cq[udma_idx];
}
static inline struct ionic_qp *to_ionic_qp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct ionic_qp, ibqp);
}
static inline struct ionic_ah *to_ionic_ah(struct ib_ah *ibah)
{
return container_of(ibah, struct ionic_ah, ibah);
}
static inline u32 ionic_ctx_dbid(struct ionic_ibdev *dev,
struct ionic_ctx *ctx)
{
if (!ctx)
return dev->lif_cfg.dbid;
return ctx->dbid;
}
static inline u32 ionic_obj_dbid(struct ionic_ibdev *dev,
struct ib_uobject *uobj)
{
return ionic_ctx_dbid(dev, to_ionic_ctx_uobj(uobj));
}
static inline void ionic_qp_complete(struct kref *kref)
{
struct ionic_qp *qp = container_of(kref, struct ionic_qp, qp_kref);
complete(&qp->qp_rel_comp);
}
static inline void ionic_cq_complete(struct kref *kref)
{
struct ionic_cq *cq = container_of(kref, struct ionic_cq, cq_kref);
@ -227,8 +423,47 @@ int ionic_create_cq_common(struct ionic_vcq *vcq,
__u32 *resp_cqid,
int udma_idx);
void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq);
void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp);
void ionic_notify_flush_cq(struct ionic_cq *cq);
int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata);
void ionic_dealloc_ucontext(struct ib_ucontext *ibctx);
int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma);
void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int ionic_destroy_ah(struct ib_ah *ibah, u32 flags);
struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access);
struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 addr, int access, struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset,
u64 length, u64 addr, int fd, int access,
struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type,
u32 max_sg);
int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
int ionic_dealloc_mw(struct ib_mw *ibmw);
int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs);
int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
struct ib_udata *udata);
int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
struct ib_udata *udata);
int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
struct ib_qp_init_attr *init_attr);
int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
/* ionic_pgtbl.c */
__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va);
int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma);
int ionic_pgtbl_init(struct ionic_ibdev *dev,
struct ionic_tbl_buf *buf,

View file

@ -7,6 +7,25 @@
#include "ionic_fw.h"
#include "ionic_ibdev.h"
__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va)
{
u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1;
u64 dma;
if (!buf->tbl_pages)
return cpu_to_le64(0);
if (buf->tbl_pages > 1)
return cpu_to_le64(buf->tbl_dma);
if (buf->tbl_buf)
dma = le64_to_cpu(buf->tbl_buf[0]);
else
dma = buf->tbl_dma;
return cpu_to_le64(dma + (va & pg_mask));
}
int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma)
{
if (unlikely(buf->tbl_pages == buf->tbl_limit))

View file

@ -0,0 +1,115 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc */
#ifndef IONIC_ABI_H
#define IONIC_ABI_H
#include <linux/types.h>
#define IONIC_ABI_VERSION 1
#define IONIC_EXPDB_64 1
#define IONIC_EXPDB_128 2
#define IONIC_EXPDB_256 4
#define IONIC_EXPDB_512 8
#define IONIC_EXPDB_SQ 1
#define IONIC_EXPDB_RQ 2
#define IONIC_CMB_ENABLE 1
#define IONIC_CMB_REQUIRE 2
#define IONIC_CMB_EXPDB 4
#define IONIC_CMB_WC 8
#define IONIC_CMB_UC 16
struct ionic_ctx_req {
__u32 rsvd[2];
};
struct ionic_ctx_resp {
__u32 rsvd;
__u32 page_shift;
__aligned_u64 dbell_offset;
__u16 version;
__u8 qp_opcodes;
__u8 admin_opcodes;
__u8 sq_qtype;
__u8 rq_qtype;
__u8 cq_qtype;
__u8 admin_qtype;
__u8 max_stride;
__u8 max_spec;
__u8 udma_count;
__u8 expdb_mask;
__u8 expdb_qtypes;
__u8 rsvd2[3];
};
struct ionic_qdesc {
__aligned_u64 addr;
__u32 size;
__u16 mask;
__u8 depth_log2;
__u8 stride_log2;
};
struct ionic_ah_resp {
__u32 ahid;
__u32 pad;
};
struct ionic_cq_req {
struct ionic_qdesc cq[2];
__u8 udma_mask;
__u8 rsvd[7];
};
struct ionic_cq_resp {
__u32 cqid[2];
__u8 udma_mask;
__u8 rsvd[7];
};
struct ionic_qp_req {
struct ionic_qdesc sq;
struct ionic_qdesc rq;
__u8 sq_spec;
__u8 rq_spec;
__u8 sq_cmb;
__u8 rq_cmb;
__u8 udma_mask;
__u8 rsvd[3];
};
struct ionic_qp_resp {
__u32 qpid;
__u8 sq_cmb;
__u8 rq_cmb;
__u8 udma_idx;
__u8 rsvd[1];
__aligned_u64 sq_cmb_offset;
__aligned_u64 rq_cmb_offset;
};
struct ionic_srq_req {
struct ionic_qdesc rq;
__u8 rq_spec;
__u8 rq_cmb;
__u8 udma_mask;
__u8 rsvd[5];
};
struct ionic_srq_resp {
__u32 qpid;
__u8 rq_cmb;
__u8 udma_idx;
__u8 rsvd[2];
__aligned_u64 rq_cmb_offset;
};
#endif /* IONIC_ABI_H */