RDMA/ionic: Register device ops for datapath

Implement device supported verb APIs for datapath.

Co-developed-by: Andrew Boyer <andrew.boyer@amd.com>
Signed-off-by: Andrew Boyer <andrew.boyer@amd.com>
Co-developed-by: Allen Hubbe <allen.hubbe@amd.com>
Signed-off-by: Allen Hubbe <allen.hubbe@amd.com>
Signed-off-by: Abhijit Gangurde <abhijit.gangurde@amd.com>
Link: https://patch.msgid.link/20250903061606.4139957-12-abhijit.gangurde@amd.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
Abhijit Gangurde 2025-09-03 11:46:03 +05:30 committed by Leon Romanovsky
parent e8521822c7
commit b83c62055b
5 changed files with 1534 additions and 0 deletions

File diff suppressed because it is too large Load diff

View file

@ -163,6 +163,61 @@ static inline int to_ionic_qp_flags(int access, bool sqd_notify,
return flags;
}
/* cqe non-admin status indicated in status_length field when err bit is set */
enum ionic_status {
IONIC_STS_OK,
IONIC_STS_LOCAL_LEN_ERR,
IONIC_STS_LOCAL_QP_OPER_ERR,
IONIC_STS_LOCAL_PROT_ERR,
IONIC_STS_WQE_FLUSHED_ERR,
IONIC_STS_MEM_MGMT_OPER_ERR,
IONIC_STS_BAD_RESP_ERR,
IONIC_STS_LOCAL_ACC_ERR,
IONIC_STS_REMOTE_INV_REQ_ERR,
IONIC_STS_REMOTE_ACC_ERR,
IONIC_STS_REMOTE_OPER_ERR,
IONIC_STS_RETRY_EXCEEDED,
IONIC_STS_RNR_RETRY_EXCEEDED,
IONIC_STS_XRC_VIO_ERR,
IONIC_STS_LOCAL_SGL_INV_ERR,
};
static inline int ionic_to_ib_status(int sts)
{
switch (sts) {
case IONIC_STS_OK:
return IB_WC_SUCCESS;
case IONIC_STS_LOCAL_LEN_ERR:
return IB_WC_LOC_LEN_ERR;
case IONIC_STS_LOCAL_QP_OPER_ERR:
case IONIC_STS_LOCAL_SGL_INV_ERR:
return IB_WC_LOC_QP_OP_ERR;
case IONIC_STS_LOCAL_PROT_ERR:
return IB_WC_LOC_PROT_ERR;
case IONIC_STS_WQE_FLUSHED_ERR:
return IB_WC_WR_FLUSH_ERR;
case IONIC_STS_MEM_MGMT_OPER_ERR:
return IB_WC_MW_BIND_ERR;
case IONIC_STS_BAD_RESP_ERR:
return IB_WC_BAD_RESP_ERR;
case IONIC_STS_LOCAL_ACC_ERR:
return IB_WC_LOC_ACCESS_ERR;
case IONIC_STS_REMOTE_INV_REQ_ERR:
return IB_WC_REM_INV_REQ_ERR;
case IONIC_STS_REMOTE_ACC_ERR:
return IB_WC_REM_ACCESS_ERR;
case IONIC_STS_REMOTE_OPER_ERR:
return IB_WC_REM_OP_ERR;
case IONIC_STS_RETRY_EXCEEDED:
return IB_WC_RETRY_EXC_ERR;
case IONIC_STS_RNR_RETRY_EXCEEDED:
return IB_WC_RNR_RETRY_EXC_ERR;
case IONIC_STS_XRC_VIO_ERR:
default:
return IB_WC_GENERAL_ERR;
}
}
/* admin queue qp type */
enum ionic_qp_type {
IONIC_QPT_RC,
@ -294,6 +349,24 @@ struct ionic_v1_cqe {
__be32 qid_type_flags;
};
/* bits for cqe recv */
enum ionic_v1_cqe_src_qpn_bits {
IONIC_V1_CQE_RECV_QPN_MASK = 0xffffff,
IONIC_V1_CQE_RECV_OP_SHIFT = 24,
/* MASK could be 0x3, but need 0x1f for makeshift values:
* OP_TYPE_RDMA_OPER_WITH_IMM, OP_TYPE_SEND_RCVD
*/
IONIC_V1_CQE_RECV_OP_MASK = 0x1f,
IONIC_V1_CQE_RECV_OP_SEND = 0,
IONIC_V1_CQE_RECV_OP_SEND_INV = 1,
IONIC_V1_CQE_RECV_OP_SEND_IMM = 2,
IONIC_V1_CQE_RECV_OP_RDMA_IMM = 3,
IONIC_V1_CQE_RECV_IS_IPV4 = BIT(7 + IONIC_V1_CQE_RECV_OP_SHIFT),
IONIC_V1_CQE_RECV_IS_VLAN = BIT(6 + IONIC_V1_CQE_RECV_OP_SHIFT),
};
/* bits for cqe qid_type_flags */
enum ionic_v1_cqe_qtf_bits {
IONIC_V1_CQE_COLOR = BIT(0),
@ -318,6 +391,16 @@ static inline bool ionic_v1_cqe_error(struct ionic_v1_cqe *cqe)
return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_ERROR);
}
static inline bool ionic_v1_cqe_recv_is_ipv4(struct ionic_v1_cqe *cqe)
{
return cqe->recv.src_qpn_op & cpu_to_be32(IONIC_V1_CQE_RECV_IS_IPV4);
}
static inline bool ionic_v1_cqe_recv_is_vlan(struct ionic_v1_cqe *cqe)
{
return cqe->recv.src_qpn_op & cpu_to_be32(IONIC_V1_CQE_RECV_IS_VLAN);
}
static inline void ionic_v1_cqe_clean(struct ionic_v1_cqe *cqe)
{
cqe->qid_type_flags |= cpu_to_be32(~0u << IONIC_V1_CQE_QID_SHIFT);
@ -444,6 +527,28 @@ enum ionic_v1_op {
IONIC_V1_SPEC_FIRST_SGE = 2,
};
/* queue pair v2 send opcodes */
enum ionic_v2_op {
IONIC_V2_OPSL_OUT = 0x20,
IONIC_V2_OPSL_IMM = 0x40,
IONIC_V2_OPSL_INV = 0x80,
IONIC_V2_OP_SEND = 0x0 | IONIC_V2_OPSL_OUT,
IONIC_V2_OP_SEND_IMM = IONIC_V2_OP_SEND | IONIC_V2_OPSL_IMM,
IONIC_V2_OP_SEND_INV = IONIC_V2_OP_SEND | IONIC_V2_OPSL_INV,
IONIC_V2_OP_RDMA_WRITE = 0x1 | IONIC_V2_OPSL_OUT,
IONIC_V2_OP_RDMA_WRITE_IMM = IONIC_V2_OP_RDMA_WRITE | IONIC_V2_OPSL_IMM,
IONIC_V2_OP_RDMA_READ = 0x2,
IONIC_V2_OP_ATOMIC_CS = 0x4,
IONIC_V2_OP_ATOMIC_FA = 0x5,
IONIC_V2_OP_REG_MR = 0x6,
IONIC_V2_OP_LOCAL_INV = 0x7,
IONIC_V2_OP_BIND_MW = 0x8,
};
static inline size_t ionic_v1_send_wqe_min_size(int min_sge, int min_data,
int spec, bool expdb)
{

View file

@ -45,6 +45,11 @@ static const struct ib_device_ops ionic_dev_ops = {
.query_qp = ionic_query_qp,
.destroy_qp = ionic_destroy_qp,
.post_send = ionic_post_send,
.post_recv = ionic_post_recv,
.poll_cq = ionic_poll_cq,
.req_notify_cq = ionic_req_notify_cq,
INIT_RDMA_OBJ_SIZE(ib_ucontext, ionic_ctx, ibctx),
INIT_RDMA_OBJ_SIZE(ib_pd, ionic_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_ah, ionic_ah, ibah),

View file

@ -387,6 +387,11 @@ static inline u32 ionic_obj_dbid(struct ionic_ibdev *dev,
return ionic_ctx_dbid(dev, to_ionic_ctx_uobj(uobj));
}
static inline bool ionic_ibop_is_local(enum ib_wr_opcode op)
{
return op == IB_WR_LOCAL_INV || op == IB_WR_REG_MR;
}
static inline void ionic_qp_complete(struct kref *kref)
{
struct ionic_qp *qp = container_of(kref, struct ionic_qp, qp_kref);
@ -462,8 +467,17 @@ int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
struct ib_qp_init_attr *init_attr);
int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
/* ionic_datapath.c */
int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad);
int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad);
int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc);
int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
/* ionic_pgtbl.c */
__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va);
__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va);
int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma);
int ionic_pgtbl_init(struct ionic_ibdev *dev,
struct ionic_tbl_buf *buf,

View file

@ -26,6 +26,17 @@ __le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va)
return cpu_to_le64(dma + (va & pg_mask));
}
__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va)
{
if (buf->tbl_pages > 1) {
u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1;
return cpu_to_be64(va & pg_mask);
}
return 0;
}
int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma)
{
if (unlikely(buf->tbl_pages == buf->tbl_limit))