RDMA/ionic: Create device queues to support admin operations

Setup RDMA admin queues using device command exposed over
auxiliary device and manage these queues using ida.

Co-developed-by: Andrew Boyer <andrew.boyer@amd.com>
Signed-off-by: Andrew Boyer <andrew.boyer@amd.com>
Co-developed-by: Allen Hubbe <allen.hubbe@amd.com>
Signed-off-by: Allen Hubbe <allen.hubbe@amd.com>
Signed-off-by: Abhijit Gangurde <abhijit.gangurde@amd.com>
Link: https://patch.msgid.link/20250903061606.4139957-10-abhijit.gangurde@amd.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
Abhijit Gangurde 2025-09-03 11:46:01 +05:30 committed by Leon Romanovsky
parent 8d765af51a
commit f3bdbd4270
9 changed files with 2300 additions and 0 deletions

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,181 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
#include "ionic_ibdev.h"
static int ionic_validate_qdesc(struct ionic_qdesc *q)
{
if (!q->addr || !q->size || !q->mask ||
!q->depth_log2 || !q->stride_log2)
return -EINVAL;
if (q->addr & (PAGE_SIZE - 1))
return -EINVAL;
if (q->mask != BIT(q->depth_log2) - 1)
return -EINVAL;
if (q->size < BIT_ULL(q->depth_log2 + q->stride_log2))
return -EINVAL;
return 0;
}
static u32 ionic_get_eqid(struct ionic_ibdev *dev, u32 comp_vector, u8 udma_idx)
{
/* EQ per vector per udma, and the first eqs reserved for async events.
* The rest of the vectors can be requested for completions.
*/
u32 comp_vec_count = dev->lif_cfg.eq_count / dev->lif_cfg.udma_count - 1;
return (comp_vector % comp_vec_count + 1) * dev->lif_cfg.udma_count + udma_idx;
}
static int ionic_get_cqid(struct ionic_ibdev *dev, u32 *cqid, u8 udma_idx)
{
unsigned int size, base, bound;
int rc;
size = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
base = size * udma_idx;
bound = base + size;
rc = ionic_resid_get_shared(&dev->inuse_cqid, base, bound);
if (rc >= 0) {
/* cq_base is zero or a multiple of two queue groups */
*cqid = dev->lif_cfg.cq_base +
ionic_bitid_to_qid(rc, dev->lif_cfg.udma_qgrp_shift,
dev->half_cqid_udma_shift);
rc = 0;
}
return rc;
}
static void ionic_put_cqid(struct ionic_ibdev *dev, u32 cqid)
{
u32 bitid = ionic_qid_to_bitid(cqid - dev->lif_cfg.cq_base,
dev->lif_cfg.udma_qgrp_shift,
dev->half_cqid_udma_shift);
ionic_resid_put(&dev->inuse_cqid, bitid);
}
int ionic_create_cq_common(struct ionic_vcq *vcq,
struct ionic_tbl_buf *buf,
const struct ib_cq_init_attr *attr,
struct ionic_ctx *ctx,
struct ib_udata *udata,
struct ionic_qdesc *req_cq,
__u32 *resp_cqid,
int udma_idx)
{
struct ionic_ibdev *dev = to_ionic_ibdev(vcq->ibcq.device);
struct ionic_cq *cq = &vcq->cq[udma_idx];
void *entry;
int rc;
cq->vcq = vcq;
if (attr->cqe < 1 || attr->cqe + IONIC_CQ_GRACE > 0xffff) {
rc = -EINVAL;
goto err_args;
}
rc = ionic_get_cqid(dev, &cq->cqid, udma_idx);
if (rc)
goto err_args;
cq->eqid = ionic_get_eqid(dev, attr->comp_vector, udma_idx);
spin_lock_init(&cq->lock);
INIT_LIST_HEAD(&cq->poll_sq);
INIT_LIST_HEAD(&cq->flush_sq);
INIT_LIST_HEAD(&cq->flush_rq);
if (udata) {
rc = ionic_validate_qdesc(req_cq);
if (rc)
goto err_qdesc;
cq->umem = ib_umem_get(&dev->ibdev, req_cq->addr, req_cq->size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
rc = PTR_ERR(cq->umem);
goto err_qdesc;
}
cq->q.ptr = NULL;
cq->q.size = req_cq->size;
cq->q.mask = req_cq->mask;
cq->q.depth_log2 = req_cq->depth_log2;
cq->q.stride_log2 = req_cq->stride_log2;
*resp_cqid = cq->cqid;
} else {
rc = ionic_queue_init(&cq->q, dev->lif_cfg.hwdev,
attr->cqe + IONIC_CQ_GRACE,
sizeof(struct ionic_v1_cqe));
if (rc)
goto err_q_init;
ionic_queue_dbell_init(&cq->q, cq->cqid);
cq->color = true;
cq->credit = cq->q.mask;
}
rc = ionic_pgtbl_init(dev, buf, cq->umem, cq->q.dma, 1, PAGE_SIZE);
if (rc)
goto err_pgtbl_init;
init_completion(&cq->cq_rel_comp);
kref_init(&cq->cq_kref);
entry = xa_store_irq(&dev->cq_tbl, cq->cqid, cq, GFP_KERNEL);
if (entry) {
if (!xa_is_err(entry))
rc = -EINVAL;
else
rc = xa_err(entry);
goto err_xa;
}
return 0;
err_xa:
ionic_pgtbl_unbuf(dev, buf);
err_pgtbl_init:
if (!udata)
ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
err_q_init:
if (cq->umem)
ib_umem_release(cq->umem);
err_qdesc:
ionic_put_cqid(dev, cq->cqid);
err_args:
cq->vcq = NULL;
return rc;
}
void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq)
{
if (!cq->vcq)
return;
xa_erase_irq(&dev->cq_tbl, cq->cqid);
kref_put(&cq->cq_kref, ionic_cq_complete);
wait_for_completion(&cq->cq_rel_comp);
if (cq->umem)
ib_umem_release(cq->umem);
else
ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
ionic_put_cqid(dev, cq->cqid);
cq->vcq = NULL;
}

View file

@ -0,0 +1,164 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
#ifndef _IONIC_FW_H_
#define _IONIC_FW_H_
#include <linux/kernel.h>
/* completion queue v1 cqe */
struct ionic_v1_cqe {
union {
struct {
__be16 cmd_idx;
__u8 cmd_op;
__u8 rsvd[17];
__le16 old_sq_cindex;
__le16 old_rq_cq_cindex;
} admin;
struct {
__u64 wqe_id;
__be32 src_qpn_op;
__u8 src_mac[6];
__be16 vlan_tag;
__be32 imm_data_rkey;
} recv;
struct {
__u8 rsvd[4];
__be32 msg_msn;
__u8 rsvd2[8];
__u64 npg_wqe_id;
} send;
};
__be32 status_length;
__be32 qid_type_flags;
};
/* bits for cqe qid_type_flags */
enum ionic_v1_cqe_qtf_bits {
IONIC_V1_CQE_COLOR = BIT(0),
IONIC_V1_CQE_ERROR = BIT(1),
IONIC_V1_CQE_TYPE_SHIFT = 5,
IONIC_V1_CQE_TYPE_MASK = 0x7,
IONIC_V1_CQE_QID_SHIFT = 8,
IONIC_V1_CQE_TYPE_ADMIN = 0,
IONIC_V1_CQE_TYPE_RECV = 1,
IONIC_V1_CQE_TYPE_SEND_MSN = 2,
IONIC_V1_CQE_TYPE_SEND_NPG = 3,
};
static inline bool ionic_v1_cqe_color(struct ionic_v1_cqe *cqe)
{
return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_COLOR);
}
static inline bool ionic_v1_cqe_error(struct ionic_v1_cqe *cqe)
{
return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_ERROR);
}
static inline void ionic_v1_cqe_clean(struct ionic_v1_cqe *cqe)
{
cqe->qid_type_flags |= cpu_to_be32(~0u << IONIC_V1_CQE_QID_SHIFT);
}
static inline u32 ionic_v1_cqe_qtf(struct ionic_v1_cqe *cqe)
{
return be32_to_cpu(cqe->qid_type_flags);
}
static inline u8 ionic_v1_cqe_qtf_type(u32 qtf)
{
return (qtf >> IONIC_V1_CQE_TYPE_SHIFT) & IONIC_V1_CQE_TYPE_MASK;
}
static inline u32 ionic_v1_cqe_qtf_qid(u32 qtf)
{
return qtf >> IONIC_V1_CQE_QID_SHIFT;
}
#define ADMIN_WQE_STRIDE 64
#define ADMIN_WQE_HDR_LEN 4
/* admin queue v1 wqe */
struct ionic_v1_admin_wqe {
__u8 op;
__u8 rsvd;
__le16 len;
union {
} cmd;
};
/* admin queue v1 cqe status */
enum ionic_v1_admin_status {
IONIC_V1_ASTS_OK,
IONIC_V1_ASTS_BAD_CMD,
IONIC_V1_ASTS_BAD_INDEX,
IONIC_V1_ASTS_BAD_STATE,
IONIC_V1_ASTS_BAD_TYPE,
IONIC_V1_ASTS_BAD_ATTR,
IONIC_V1_ASTS_MSG_TOO_BIG,
};
/* event queue v1 eqe */
struct ionic_v1_eqe {
__be32 evt;
};
/* bits for cqe queue_type_flags */
enum ionic_v1_eqe_evt_bits {
IONIC_V1_EQE_COLOR = BIT(0),
IONIC_V1_EQE_TYPE_SHIFT = 1,
IONIC_V1_EQE_TYPE_MASK = 0x7,
IONIC_V1_EQE_CODE_SHIFT = 4,
IONIC_V1_EQE_CODE_MASK = 0xf,
IONIC_V1_EQE_QID_SHIFT = 8,
/* cq events */
IONIC_V1_EQE_TYPE_CQ = 0,
/* cq normal events */
IONIC_V1_EQE_CQ_NOTIFY = 0,
/* cq error events */
IONIC_V1_EQE_CQ_ERR = 8,
/* qp and srq events */
IONIC_V1_EQE_TYPE_QP = 1,
/* qp normal events */
IONIC_V1_EQE_SRQ_LEVEL = 0,
IONIC_V1_EQE_SQ_DRAIN = 1,
IONIC_V1_EQE_QP_COMM_EST = 2,
IONIC_V1_EQE_QP_LAST_WQE = 3,
/* qp error events */
IONIC_V1_EQE_QP_ERR = 8,
IONIC_V1_EQE_QP_ERR_REQUEST = 9,
IONIC_V1_EQE_QP_ERR_ACCESS = 10,
};
static inline bool ionic_v1_eqe_color(struct ionic_v1_eqe *eqe)
{
return eqe->evt & cpu_to_be32(IONIC_V1_EQE_COLOR);
}
static inline u32 ionic_v1_eqe_evt(struct ionic_v1_eqe *eqe)
{
return be32_to_cpu(eqe->evt);
}
static inline u8 ionic_v1_eqe_evt_type(u32 evt)
{
return (evt >> IONIC_V1_EQE_TYPE_SHIFT) & IONIC_V1_EQE_TYPE_MASK;
}
static inline u8 ionic_v1_eqe_evt_code(u32 evt)
{
return (evt >> IONIC_V1_EQE_CODE_SHIFT) & IONIC_V1_EQE_CODE_MASK;
}
static inline u32 ionic_v1_eqe_evt_qid(u32 evt)
{
return evt >> IONIC_V1_EQE_QID_SHIFT;
}
#endif /* _IONIC_FW_H_ */

View file

@ -15,9 +15,41 @@ MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS("NET_IONIC");
static void ionic_init_resids(struct ionic_ibdev *dev)
{
ionic_resid_init(&dev->inuse_cqid, dev->lif_cfg.cq_count);
dev->half_cqid_udma_shift =
order_base_2(dev->lif_cfg.cq_count / dev->lif_cfg.udma_count);
ionic_resid_init(&dev->inuse_pdid, IONIC_MAX_PD);
ionic_resid_init(&dev->inuse_ahid, dev->lif_cfg.nahs_per_lif);
ionic_resid_init(&dev->inuse_mrid, dev->lif_cfg.nmrs_per_lif);
/* skip reserved lkey */
dev->next_mrkey = 1;
ionic_resid_init(&dev->inuse_qpid, dev->lif_cfg.qp_count);
/* skip reserved SMI and GSI qpids */
dev->half_qpid_udma_shift =
order_base_2(dev->lif_cfg.qp_count / dev->lif_cfg.udma_count);
ionic_resid_init(&dev->inuse_dbid, dev->lif_cfg.dbid_count);
}
static void ionic_destroy_resids(struct ionic_ibdev *dev)
{
ionic_resid_destroy(&dev->inuse_cqid);
ionic_resid_destroy(&dev->inuse_pdid);
ionic_resid_destroy(&dev->inuse_ahid);
ionic_resid_destroy(&dev->inuse_mrid);
ionic_resid_destroy(&dev->inuse_qpid);
ionic_resid_destroy(&dev->inuse_dbid);
}
static void ionic_destroy_ibdev(struct ionic_ibdev *dev)
{
ionic_kill_rdma_admin(dev, false);
ib_unregister_device(&dev->ibdev);
ionic_destroy_rdma_admin(dev);
ionic_destroy_resids(dev);
WARN_ON(!xa_empty(&dev->cq_tbl));
xa_destroy(&dev->cq_tbl);
ib_dealloc_device(&dev->ibdev);
}
@ -34,6 +66,18 @@ static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
ionic_fill_lif_cfg(ionic_adev->lif, &dev->lif_cfg);
xa_init_flags(&dev->cq_tbl, GFP_ATOMIC);
ionic_init_resids(dev);
rc = ionic_rdma_reset_devcmd(dev);
if (rc)
goto err_reset;
rc = ionic_create_rdma_admin(dev);
if (rc)
goto err_admin;
ibdev = &dev->ibdev;
ibdev->dev.parent = dev->lif_cfg.hwdev;
@ -62,6 +106,11 @@ static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
err_register:
err_admin:
ionic_kill_rdma_admin(dev, false);
ionic_destroy_rdma_admin(dev);
err_reset:
ionic_destroy_resids(dev);
xa_destroy(&dev->cq_tbl);
ib_dealloc_device(&dev->ibdev);
return ERR_PTR(rc);
@ -112,6 +161,10 @@ static int __init ionic_mod_init(void)
{
int rc;
ionic_evt_workq = create_workqueue(DRIVER_NAME "-evt");
if (!ionic_evt_workq)
return -ENOMEM;
rc = auxiliary_driver_register(&ionic_aux_r_driver);
if (rc)
goto err_aux;
@ -119,12 +172,15 @@ static int __init ionic_mod_init(void)
return 0;
err_aux:
destroy_workqueue(ionic_evt_workq);
return rc;
}
static void __exit ionic_mod_exit(void)
{
auxiliary_driver_unregister(&ionic_aux_r_driver);
destroy_workqueue(ionic_evt_workq);
}
module_init(ionic_mod_init);

View file

@ -4,15 +4,237 @@
#ifndef _IONIC_IBDEV_H_
#define _IONIC_IBDEV_H_
#include <rdma/ib_umem.h>
#include <rdma/ib_verbs.h>
#include <ionic_api.h>
#include <ionic_regs.h>
#include "ionic_fw.h"
#include "ionic_queue.h"
#include "ionic_res.h"
#include "ionic_lif_cfg.h"
/* Config knobs */
#define IONIC_EQ_DEPTH 511
#define IONIC_EQ_COUNT 32
#define IONIC_AQ_DEPTH 63
#define IONIC_AQ_COUNT 4
#define IONIC_EQ_ISR_BUDGET 10
#define IONIC_EQ_WORK_BUDGET 1000
#define IONIC_MAX_PD 1024
#define IONIC_CQ_GRACE 100
struct ionic_aq;
struct ionic_cq;
struct ionic_eq;
struct ionic_vcq;
enum ionic_admin_state {
IONIC_ADMIN_ACTIVE, /* submitting admin commands to queue */
IONIC_ADMIN_PAUSED, /* not submitting, but may complete normally */
IONIC_ADMIN_KILLED, /* not submitting, locally completed */
};
enum ionic_admin_flags {
IONIC_ADMIN_F_BUSYWAIT = BIT(0), /* Don't sleep */
IONIC_ADMIN_F_TEARDOWN = BIT(1), /* In destroy path */
IONIC_ADMIN_F_INTERRUPT = BIT(2), /* Interruptible w/timeout */
};
struct ionic_qdesc {
__aligned_u64 addr;
__u32 size;
__u16 mask;
__u8 depth_log2;
__u8 stride_log2;
};
enum ionic_mmap_flag {
IONIC_MMAP_WC = BIT(0),
};
struct ionic_mmap_entry {
struct rdma_user_mmap_entry rdma_entry;
unsigned long size;
unsigned long pfn;
u8 mmap_flags;
};
struct ionic_ibdev {
struct ib_device ibdev;
struct ionic_lif_cfg lif_cfg;
struct xarray qp_tbl;
struct xarray cq_tbl;
struct ionic_resid_bits inuse_dbid;
struct ionic_resid_bits inuse_pdid;
struct ionic_resid_bits inuse_ahid;
struct ionic_resid_bits inuse_mrid;
struct ionic_resid_bits inuse_qpid;
struct ionic_resid_bits inuse_cqid;
u8 half_cqid_udma_shift;
u8 half_qpid_udma_shift;
u8 next_qpid_udma_idx;
u8 next_mrkey;
struct work_struct reset_work;
bool reset_posted;
u32 reset_cnt;
struct delayed_work admin_dwork;
struct ionic_aq **aq_vec;
atomic_t admin_state;
struct ionic_eq **eq_vec;
};
struct ionic_eq {
struct ionic_ibdev *dev;
u32 eqid;
u32 intr;
struct ionic_queue q;
bool armed;
bool enable;
struct work_struct work;
int irq;
char name[32];
};
struct ionic_admin_wr {
struct completion work;
struct list_head aq_ent;
struct ionic_v1_admin_wqe wqe;
struct ionic_v1_cqe cqe;
struct ionic_aq *aq;
int status;
};
struct ionic_admin_wr_q {
struct ionic_admin_wr *wr;
int wqe_strides;
};
struct ionic_aq {
struct ionic_ibdev *dev;
struct ionic_vcq *vcq;
struct work_struct work;
atomic_t admin_state;
unsigned long stamp;
bool armed;
u32 aqid;
u32 cqid;
spinlock_t lock; /* for posting */
struct ionic_queue q;
struct ionic_admin_wr_q *q_wr;
struct list_head wr_prod;
struct list_head wr_post;
};
struct ionic_ctx {
struct ib_ucontext ibctx;
u32 dbid;
struct rdma_user_mmap_entry *mmap_dbell;
};
struct ionic_tbl_buf {
u32 tbl_limit;
u32 tbl_pages;
size_t tbl_size;
__le64 *tbl_buf;
dma_addr_t tbl_dma;
u8 page_size_log2;
};
struct ionic_cq {
struct ionic_vcq *vcq;
u32 cqid;
u32 eqid;
spinlock_t lock; /* for polling */
struct list_head poll_sq;
bool flush;
struct list_head flush_sq;
struct list_head flush_rq;
struct list_head ibkill_flush_ent;
struct ionic_queue q;
bool color;
int credit;
u16 arm_any_prod;
u16 arm_sol_prod;
struct kref cq_kref;
struct completion cq_rel_comp;
/* infrequently accessed, keep at end */
struct ib_umem *umem;
};
struct ionic_vcq {
struct ib_cq ibcq;
struct ionic_cq cq[2];
u8 udma_mask;
u8 poll_idx;
};
static inline struct ionic_ibdev *to_ionic_ibdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct ionic_ibdev, ibdev);
}
static inline void ionic_cq_complete(struct kref *kref)
{
struct ionic_cq *cq = container_of(kref, struct ionic_cq, cq_kref);
complete(&cq->cq_rel_comp);
}
/* ionic_admin.c */
extern struct workqueue_struct *ionic_evt_workq;
void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr);
int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr,
enum ionic_admin_flags);
int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev);
int ionic_create_rdma_admin(struct ionic_ibdev *dev);
void ionic_destroy_rdma_admin(struct ionic_ibdev *dev);
void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path);
/* ionic_controlpath.c */
int ionic_create_cq_common(struct ionic_vcq *vcq,
struct ionic_tbl_buf *buf,
const struct ib_cq_init_attr *attr,
struct ionic_ctx *ctx,
struct ib_udata *udata,
struct ionic_qdesc *req_cq,
__u32 *resp_cqid,
int udma_idx);
void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq);
/* ionic_pgtbl.c */
int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma);
int ionic_pgtbl_init(struct ionic_ibdev *dev,
struct ionic_tbl_buf *buf,
struct ib_umem *umem,
dma_addr_t dma,
int limit,
u64 page_size);
void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf);
#endif /* _IONIC_IBDEV_H_ */

View file

@ -0,0 +1,113 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
#include <linux/mman.h>
#include <linux/dma-mapping.h>
#include "ionic_fw.h"
#include "ionic_ibdev.h"
int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma)
{
if (unlikely(buf->tbl_pages == buf->tbl_limit))
return -ENOMEM;
if (buf->tbl_buf)
buf->tbl_buf[buf->tbl_pages] = cpu_to_le64(dma);
else
buf->tbl_dma = dma;
++buf->tbl_pages;
return 0;
}
static int ionic_tbl_buf_alloc(struct ionic_ibdev *dev,
struct ionic_tbl_buf *buf)
{
int rc;
buf->tbl_size = buf->tbl_limit * sizeof(*buf->tbl_buf);
buf->tbl_buf = kmalloc(buf->tbl_size, GFP_KERNEL);
if (!buf->tbl_buf)
return -ENOMEM;
buf->tbl_dma = dma_map_single(dev->lif_cfg.hwdev, buf->tbl_buf,
buf->tbl_size, DMA_TO_DEVICE);
rc = dma_mapping_error(dev->lif_cfg.hwdev, buf->tbl_dma);
if (rc) {
kfree(buf->tbl_buf);
return rc;
}
return 0;
}
static int ionic_pgtbl_umem(struct ionic_tbl_buf *buf, struct ib_umem *umem)
{
struct ib_block_iter biter;
u64 page_dma;
int rc;
rdma_umem_for_each_dma_block(umem, &biter, BIT_ULL(buf->page_size_log2)) {
page_dma = rdma_block_iter_dma_address(&biter);
rc = ionic_pgtbl_page(buf, page_dma);
if (rc)
return rc;
}
return 0;
}
void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf)
{
if (buf->tbl_buf)
dma_unmap_single(dev->lif_cfg.hwdev, buf->tbl_dma,
buf->tbl_size, DMA_TO_DEVICE);
kfree(buf->tbl_buf);
memset(buf, 0, sizeof(*buf));
}
int ionic_pgtbl_init(struct ionic_ibdev *dev,
struct ionic_tbl_buf *buf,
struct ib_umem *umem,
dma_addr_t dma,
int limit,
u64 page_size)
{
int rc;
memset(buf, 0, sizeof(*buf));
if (umem) {
limit = ib_umem_num_dma_blocks(umem, page_size);
buf->page_size_log2 = order_base_2(page_size);
}
if (limit < 1)
return -EINVAL;
buf->tbl_limit = limit;
/* skip pgtbl if contiguous / direct translation */
if (limit > 1) {
rc = ionic_tbl_buf_alloc(dev, buf);
if (rc)
return rc;
}
if (umem)
rc = ionic_pgtbl_umem(buf, umem);
else
rc = ionic_pgtbl_page(buf, dma);
if (rc)
goto err_unbuf;
return 0;
err_unbuf:
ionic_pgtbl_unbuf(dev, buf);
return rc;
}

View file

@ -0,0 +1,52 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
#include <linux/dma-mapping.h>
#include "ionic_queue.h"
int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
int depth, size_t stride)
{
if (depth < 0 || depth > 0xffff)
return -EINVAL;
if (stride == 0 || stride > 0x10000)
return -EINVAL;
if (depth == 0)
depth = 1;
q->depth_log2 = order_base_2(depth + 1);
q->stride_log2 = order_base_2(stride);
if (q->depth_log2 + q->stride_log2 < PAGE_SHIFT)
q->depth_log2 = PAGE_SHIFT - q->stride_log2;
if (q->depth_log2 > 16 || q->stride_log2 > 16)
return -EINVAL;
q->size = BIT_ULL(q->depth_log2 + q->stride_log2);
q->mask = BIT(q->depth_log2) - 1;
q->ptr = dma_alloc_coherent(dma_dev, q->size, &q->dma, GFP_KERNEL);
if (!q->ptr)
return -ENOMEM;
/* it will always be page aligned, but just to be sure... */
if (!PAGE_ALIGNED(q->ptr)) {
dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
return -ENOMEM;
}
q->prod = 0;
q->cons = 0;
q->dbell = 0;
return 0;
}
void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev)
{
dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
}

View file

@ -0,0 +1,234 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
#ifndef _IONIC_QUEUE_H_
#define _IONIC_QUEUE_H_
#include <linux/io.h>
#include <ionic_regs.h>
#define IONIC_MAX_DEPTH 0xffff
#define IONIC_MAX_CQ_DEPTH 0xffff
#define IONIC_CQ_RING_ARM IONIC_DBELL_RING_1
#define IONIC_CQ_RING_SOL IONIC_DBELL_RING_2
/**
* struct ionic_queue - Ring buffer used between device and driver
* @size: Size of the buffer, in bytes
* @dma: Dma address of the buffer
* @ptr: Buffer virtual address
* @prod: Driver position in the queue
* @cons: Device position in the queue
* @mask: Capacity of the queue, subtracting the hole
* This value is equal to ((1 << depth_log2) - 1)
* @depth_log2: Log base two size depth of the queue
* @stride_log2: Log base two size of an element in the queue
* @dbell: Doorbell identifying bits
*/
struct ionic_queue {
size_t size;
dma_addr_t dma;
void *ptr;
u16 prod;
u16 cons;
u16 mask;
u8 depth_log2;
u8 stride_log2;
u64 dbell;
};
/**
* ionic_queue_init() - Initialize user space queue
* @q: Uninitialized queue structure
* @dma_dev: DMA device for mapping
* @depth: Depth of the queue
* @stride: Size of each element of the queue
*
* Return: status code
*/
int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
int depth, size_t stride);
/**
* ionic_queue_destroy() - Destroy user space queue
* @q: Queue structure
* @dma_dev: DMA device for mapping
*
* Return: status code
*/
void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev);
/**
* ionic_queue_empty() - Test if queue is empty
* @q: Queue structure
*
* This is only valid for to-device queues.
*
* Return: is empty
*/
static inline bool ionic_queue_empty(struct ionic_queue *q)
{
return q->prod == q->cons;
}
/**
* ionic_queue_length() - Get the current length of the queue
* @q: Queue structure
*
* This is only valid for to-device queues.
*
* Return: length
*/
static inline u16 ionic_queue_length(struct ionic_queue *q)
{
return (q->prod - q->cons) & q->mask;
}
/**
* ionic_queue_length_remaining() - Get the remaining length of the queue
* @q: Queue structure
*
* This is only valid for to-device queues.
*
* Return: length remaining
*/
static inline u16 ionic_queue_length_remaining(struct ionic_queue *q)
{
return q->mask - ionic_queue_length(q);
}
/**
* ionic_queue_full() - Test if queue is full
* @q: Queue structure
*
* This is only valid for to-device queues.
*
* Return: is full
*/
static inline bool ionic_queue_full(struct ionic_queue *q)
{
return q->mask == ionic_queue_length(q);
}
/**
* ionic_color_wrap() - Flip the color if prod is wrapped
* @prod: Queue index just after advancing
* @color: Queue color just prior to advancing the index
*
* Return: color after advancing the index
*/
static inline bool ionic_color_wrap(u16 prod, bool color)
{
/* logical xor color with (prod == 0) */
return color != (prod == 0);
}
/**
* ionic_queue_at() - Get the element at the given index
* @q: Queue structure
* @idx: Index in the queue
*
* The index must be within the bounds of the queue. It is not checked here.
*
* Return: pointer to element at index
*/
static inline void *ionic_queue_at(struct ionic_queue *q, u16 idx)
{
return q->ptr + ((unsigned long)idx << q->stride_log2);
}
/**
* ionic_queue_at_prod() - Get the element at the producer index
* @q: Queue structure
*
* Return: pointer to element at producer index
*/
static inline void *ionic_queue_at_prod(struct ionic_queue *q)
{
return ionic_queue_at(q, q->prod);
}
/**
* ionic_queue_at_cons() - Get the element at the consumer index
* @q: Queue structure
*
* Return: pointer to element at consumer index
*/
static inline void *ionic_queue_at_cons(struct ionic_queue *q)
{
return ionic_queue_at(q, q->cons);
}
/**
* ionic_queue_next() - Compute the next index
* @q: Queue structure
* @idx: Index
*
* Return: next index after idx
*/
static inline u16 ionic_queue_next(struct ionic_queue *q, u16 idx)
{
return (idx + 1) & q->mask;
}
/**
* ionic_queue_produce() - Increase the producer index
* @q: Queue structure
*
* Caller must ensure that the queue is not full. It is not checked here.
*/
static inline void ionic_queue_produce(struct ionic_queue *q)
{
q->prod = ionic_queue_next(q, q->prod);
}
/**
* ionic_queue_consume() - Increase the consumer index
* @q: Queue structure
*
* Caller must ensure that the queue is not empty. It is not checked here.
*
* This is only valid for to-device queues.
*/
static inline void ionic_queue_consume(struct ionic_queue *q)
{
q->cons = ionic_queue_next(q, q->cons);
}
/**
* ionic_queue_consume_entries() - Increase the consumer index by entries
* @q: Queue structure
* @entries: Number of entries to increment
*
* Caller must ensure that the queue is not empty. It is not checked here.
*
* This is only valid for to-device queues.
*/
static inline void ionic_queue_consume_entries(struct ionic_queue *q,
u16 entries)
{
q->cons = (q->cons + entries) & q->mask;
}
/**
* ionic_queue_dbell_init() - Initialize doorbell bits for queue id
* @q: Queue structure
* @qid: Queue identifying number
*/
static inline void ionic_queue_dbell_init(struct ionic_queue *q, u32 qid)
{
q->dbell = IONIC_DBELL_QID(qid);
}
/**
* ionic_queue_dbell_val() - Get current doorbell update value
* @q: Queue structure
*
* Return: current doorbell update value
*/
static inline u64 ionic_queue_dbell_val(struct ionic_queue *q)
{
return q->dbell | q->prod;
}
#endif /* _IONIC_QUEUE_H_ */

View file

@ -0,0 +1,154 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
#ifndef _IONIC_RES_H_
#define _IONIC_RES_H_
#include <linux/kernel.h>
#include <linux/idr.h>
/**
* struct ionic_resid_bits - Number allocator based on IDA
*
* @inuse: IDA handle
* @inuse_size: Highest ID limit for IDA
*/
struct ionic_resid_bits {
struct ida inuse;
unsigned int inuse_size;
};
/**
* ionic_resid_init() - Initialize a resid allocator
* @resid: Uninitialized resid allocator
* @size: Capacity of the allocator
*
* Return: Zero on success, or negative error number
*/
static inline void ionic_resid_init(struct ionic_resid_bits *resid,
unsigned int size)
{
resid->inuse_size = size;
ida_init(&resid->inuse);
}
/**
* ionic_resid_destroy() - Destroy a resid allocator
* @resid: Resid allocator
*/
static inline void ionic_resid_destroy(struct ionic_resid_bits *resid)
{
ida_destroy(&resid->inuse);
}
/**
* ionic_resid_get_shared() - Allocate an available shared resource id
* @resid: Resid allocator
* @min: Smallest valid resource id
* @size: One after largest valid resource id
*
* Return: Resource id, or negative error number
*/
static inline int ionic_resid_get_shared(struct ionic_resid_bits *resid,
unsigned int min,
unsigned int size)
{
return ida_alloc_range(&resid->inuse, min, size - 1, GFP_KERNEL);
}
/**
* ionic_resid_get() - Allocate an available resource id
* @resid: Resid allocator
*
* Return: Resource id, or negative error number
*/
static inline int ionic_resid_get(struct ionic_resid_bits *resid)
{
return ionic_resid_get_shared(resid, 0, resid->inuse_size);
}
/**
* ionic_resid_put() - Free a resource id
* @resid: Resid allocator
* @id: Resource id
*/
static inline void ionic_resid_put(struct ionic_resid_bits *resid, int id)
{
ida_free(&resid->inuse, id);
}
/**
* ionic_bitid_to_qid() - Transform a resource bit index into a queue id
* @bitid: Bit index
* @qgrp_shift: Log2 number of queues per queue group
* @half_qid_shift: Log2 of half the total number of queues
*
* Return: Queue id
*
* Udma-constrained queues (QPs and CQs) are associated with their udma by
* queue group. Even queue groups are associated with udma0, and odd queue
* groups with udma1.
*
* For allocating queue ids, we want to arrange the bits into two halves,
* with the even queue groups of udma0 in the lower half of the bitset,
* and the odd queue groups of udma1 in the upper half of the bitset.
* Then, one or two calls of find_next_zero_bit can examine all the bits
* for queues of an entire udma.
*
* For example, assuming eight queue groups with qgrp qids per group:
*
* bitid 0*qgrp..1*qgrp-1 : qid 0*qgrp..1*qgrp-1
* bitid 1*qgrp..2*qgrp-1 : qid 2*qgrp..3*qgrp-1
* bitid 2*qgrp..3*qgrp-1 : qid 4*qgrp..5*qgrp-1
* bitid 3*qgrp..4*qgrp-1 : qid 6*qgrp..7*qgrp-1
* bitid 4*qgrp..5*qgrp-1 : qid 1*qgrp..2*qgrp-1
* bitid 5*qgrp..6*qgrp-1 : qid 3*qgrp..4*qgrp-1
* bitid 6*qgrp..7*qgrp-1 : qid 5*qgrp..6*qgrp-1
* bitid 7*qgrp..8*qgrp-1 : qid 7*qgrp..8*qgrp-1
*
* There are three important ranges of bits in the qid. There is the udma
* bit "U" at qgrp_shift, which is the least significant bit of the group
* index, and determines which udma a queue is associated with.
* The bits of lesser significance we can call the idx bits "I", which are
* the index of the queue within the group. The bits of greater significance
* we can call the grp bits "G", which are other bits of the group index that
* do not determine the udma. Those bits are just rearranged in the bit index
* in the bitset. A bitid has the udma bit in the most significant place,
* then the grp bits, then the idx bits.
*
* bitid: 00000000000000 U GGG IIIIII
* qid: 00000000000000 GGG U IIIIII
*
* Transforming from bit index to qid, or from qid to bit index, can be
* accomplished by rearranging the bits by masking and shifting.
*/
static inline u32 ionic_bitid_to_qid(u32 bitid, u8 qgrp_shift,
u8 half_qid_shift)
{
u32 udma_bit =
(bitid & BIT(half_qid_shift)) >> (half_qid_shift - qgrp_shift);
u32 grp_bits = (bitid & GENMASK(half_qid_shift - 1, qgrp_shift)) << 1;
u32 idx_bits = bitid & (BIT(qgrp_shift) - 1);
return grp_bits | udma_bit | idx_bits;
}
/**
* ionic_qid_to_bitid() - Transform a queue id into a resource bit index
* @qid: queue index
* @qgrp_shift: Log2 number of queues per queue group
* @half_qid_shift: Log2 of half the total number of queues
*
* Return: Resource bit index
*
* This is the inverse of ionic_bitid_to_qid().
*/
static inline u32 ionic_qid_to_bitid(u32 qid, u8 qgrp_shift, u8 half_qid_shift)
{
u32 udma_bit = (qid & BIT(qgrp_shift)) << (half_qid_shift - qgrp_shift);
u32 grp_bits = (qid & GENMASK(half_qid_shift, qgrp_shift + 1)) >> 1;
u32 idx_bits = qid & (BIT(qgrp_shift) - 1);
return udma_bit | grp_bits | idx_bits;
}
#endif /* _IONIC_RES_H_ */