mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	RDMA: Handle SRQ allocations by IB/core
Convert SRQ allocation from drivers to be in the IB/core Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
		
							parent
							
								
									d345691471
								
							
						
					
					
						commit
						68e326dea1
					
				
					 41 changed files with 308 additions and 413 deletions
				
			
		| 
						 | 
				
			
			@ -2224,6 +2224,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
 | 
			
		|||
 | 
			
		||||
	SET_OBJ_SIZE(dev_ops, ib_ah);
 | 
			
		||||
	SET_OBJ_SIZE(dev_ops, ib_pd);
 | 
			
		||||
	SET_OBJ_SIZE(dev_ops, ib_srq);
 | 
			
		||||
	SET_OBJ_SIZE(dev_ops, ib_ucontext);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(ib_set_device_ops);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3409,9 +3409,9 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
 | 
			
		|||
	obj->uevent.events_reported = 0;
 | 
			
		||||
	INIT_LIST_HEAD(&obj->uevent.event_list);
 | 
			
		||||
 | 
			
		||||
	srq = pd->device->ops.create_srq(pd, &attr, udata);
 | 
			
		||||
	if (IS_ERR(srq)) {
 | 
			
		||||
		ret = PTR_ERR(srq);
 | 
			
		||||
	srq = rdma_zalloc_drv_obj(ib_dev, ib_srq);
 | 
			
		||||
	if (!srq) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto err_put;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -3422,6 +3422,10 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
 | 
			
		|||
	srq->event_handler = attr.event_handler;
 | 
			
		||||
	srq->srq_context   = attr.srq_context;
 | 
			
		||||
 | 
			
		||||
	ret = pd->device->ops.create_srq(srq, &attr, udata);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto err_free;
 | 
			
		||||
 | 
			
		||||
	if (ib_srq_has_cq(cmd->srq_type)) {
 | 
			
		||||
		srq->ext.cq       = attr.ext.cq;
 | 
			
		||||
		atomic_inc(&attr.ext.cq->usecnt);
 | 
			
		||||
| 
						 | 
				
			
			@ -3461,6 +3465,8 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
 | 
			
		|||
err_copy:
 | 
			
		||||
	ib_destroy_srq_user(srq, &attrs->driver_udata);
 | 
			
		||||
 | 
			
		||||
err_free:
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
err_put:
 | 
			
		||||
	uobj_put_obj_read(pd);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -964,19 +964,21 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
 | 
			
		|||
			     struct ib_srq_init_attr *srq_init_attr)
 | 
			
		||||
{
 | 
			
		||||
	struct ib_srq *srq;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (!pd->device->ops.create_srq)
 | 
			
		||||
		return ERR_PTR(-EOPNOTSUPP);
 | 
			
		||||
 | 
			
		||||
	srq = pd->device->ops.create_srq(pd, srq_init_attr, NULL);
 | 
			
		||||
	srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
 | 
			
		||||
	if (!srq)
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
 | 
			
		||||
	if (!IS_ERR(srq)) {
 | 
			
		||||
	srq->device = pd->device;
 | 
			
		||||
	srq->pd = pd;
 | 
			
		||||
		srq->uobject       = NULL;
 | 
			
		||||
	srq->event_handler = srq_init_attr->event_handler;
 | 
			
		||||
	srq->srq_context = srq_init_attr->srq_context;
 | 
			
		||||
	srq->srq_type = srq_init_attr->srq_type;
 | 
			
		||||
 | 
			
		||||
	if (ib_srq_has_cq(srq->srq_type)) {
 | 
			
		||||
		srq->ext.cq = srq_init_attr->ext.cq;
 | 
			
		||||
		atomic_inc(&srq->ext.cq->usecnt);
 | 
			
		||||
| 
						 | 
				
			
			@ -986,7 +988,16 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
 | 
			
		|||
		atomic_inc(&srq->ext.xrc.xrcd->usecnt);
 | 
			
		||||
	}
 | 
			
		||||
	atomic_inc(&pd->usecnt);
 | 
			
		||||
		atomic_set(&srq->usecnt, 0);
 | 
			
		||||
 | 
			
		||||
	ret = pd->device->ops.create_srq(srq, srq_init_attr, NULL);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		atomic_dec(&srq->pd->usecnt);
 | 
			
		||||
		if (srq->srq_type == IB_SRQT_XRC)
 | 
			
		||||
			atomic_dec(&srq->ext.xrc.xrcd->usecnt);
 | 
			
		||||
		if (ib_srq_has_cq(srq->srq_type))
 | 
			
		||||
			atomic_dec(&srq->ext.cq->usecnt);
 | 
			
		||||
		kfree(srq);
 | 
			
		||||
		return ERR_PTR(ret);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return srq;
 | 
			
		||||
| 
						 | 
				
			
			@ -1013,32 +1024,19 @@ EXPORT_SYMBOL(ib_query_srq);
 | 
			
		|||
 | 
			
		||||
int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct ib_pd *pd;
 | 
			
		||||
	enum ib_srq_type srq_type;
 | 
			
		||||
	struct ib_xrcd *uninitialized_var(xrcd);
 | 
			
		||||
	struct ib_cq *uninitialized_var(cq);
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (atomic_read(&srq->usecnt))
 | 
			
		||||
		return -EBUSY;
 | 
			
		||||
 | 
			
		||||
	pd = srq->pd;
 | 
			
		||||
	srq_type = srq->srq_type;
 | 
			
		||||
	if (ib_srq_has_cq(srq_type))
 | 
			
		||||
		cq = srq->ext.cq;
 | 
			
		||||
	if (srq_type == IB_SRQT_XRC)
 | 
			
		||||
		xrcd = srq->ext.xrc.xrcd;
 | 
			
		||||
	srq->device->ops.destroy_srq(srq, udata);
 | 
			
		||||
 | 
			
		||||
	ret = srq->device->ops.destroy_srq(srq, udata);
 | 
			
		||||
	if (!ret) {
 | 
			
		||||
		atomic_dec(&pd->usecnt);
 | 
			
		||||
		if (srq_type == IB_SRQT_XRC)
 | 
			
		||||
			atomic_dec(&xrcd->usecnt);
 | 
			
		||||
		if (ib_srq_has_cq(srq_type))
 | 
			
		||||
			atomic_dec(&cq->usecnt);
 | 
			
		||||
	}
 | 
			
		||||
	atomic_dec(&srq->pd->usecnt);
 | 
			
		||||
	if (srq->srq_type == IB_SRQT_XRC)
 | 
			
		||||
		atomic_dec(&srq->ext.xrc.xrcd->usecnt);
 | 
			
		||||
	if (ib_srq_has_cq(srq->srq_type))
 | 
			
		||||
		atomic_dec(&srq->ext.cq->usecnt);
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(ib_destroy_srq_user);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1305,30 +1305,22 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
/* Shared Receive Queues */
 | 
			
		||||
int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
 | 
			
		||||
void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
 | 
			
		||||
					       ib_srq);
 | 
			
		||||
	struct bnxt_re_dev *rdev = srq->rdev;
 | 
			
		||||
	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
 | 
			
		||||
	struct bnxt_qplib_nq *nq = NULL;
 | 
			
		||||
	int rc;
 | 
			
		||||
 | 
			
		||||
	if (qplib_srq->cq)
 | 
			
		||||
		nq = qplib_srq->cq->nq;
 | 
			
		||||
	rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
 | 
			
		||||
	if (rc) {
 | 
			
		||||
		dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
 | 
			
		||||
		return rc;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
 | 
			
		||||
	if (srq->umem)
 | 
			
		||||
		ib_umem_release(srq->umem);
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
	atomic_dec(&rdev->srq_count);
 | 
			
		||||
	if (nq)
 | 
			
		||||
		nq->budget--;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
 | 
			
		||||
| 
						 | 
				
			
			@ -1362,14 +1354,16 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
 | 
			
		||||
int bnxt_re_create_srq(struct ib_srq *ib_srq,
 | 
			
		||||
		       struct ib_srq_init_attr *srq_init_attr,
 | 
			
		||||
		       struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct ib_pd *ib_pd = ib_srq->pd;
 | 
			
		||||
	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
 | 
			
		||||
	struct bnxt_re_dev *rdev = pd->rdev;
 | 
			
		||||
	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
 | 
			
		||||
	struct bnxt_re_srq *srq;
 | 
			
		||||
	struct bnxt_re_srq *srq =
 | 
			
		||||
		container_of(ib_srq, struct bnxt_re_srq, ib_srq);
 | 
			
		||||
	struct bnxt_qplib_nq *nq = NULL;
 | 
			
		||||
	int rc, entries;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1384,11 +1378,6 @@ struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
 | 
			
		|||
		goto exit;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
 | 
			
		||||
	if (!srq) {
 | 
			
		||||
		rc = -ENOMEM;
 | 
			
		||||
		goto exit;
 | 
			
		||||
	}
 | 
			
		||||
	srq->rdev = rdev;
 | 
			
		||||
	srq->qplib_srq.pd = &pd->qplib_pd;
 | 
			
		||||
	srq->qplib_srq.dpi = &rdev->dpi_privileged;
 | 
			
		||||
| 
						 | 
				
			
			@ -1434,14 +1423,13 @@ struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
 | 
			
		|||
		nq->budget++;
 | 
			
		||||
	atomic_inc(&rdev->srq_count);
 | 
			
		||||
 | 
			
		||||
	return &srq->ib_srq;
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
fail:
 | 
			
		||||
	if (srq->umem)
 | 
			
		||||
		ib_umem_release(srq->umem);
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
exit:
 | 
			
		||||
	return ERR_PTR(rc);
 | 
			
		||||
	return rc;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -69,9 +69,9 @@ struct bnxt_re_ah {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
struct bnxt_re_srq {
 | 
			
		||||
	struct ib_srq		ib_srq;
 | 
			
		||||
	struct bnxt_re_dev	*rdev;
 | 
			
		||||
	u32			srq_limit;
 | 
			
		||||
	struct ib_srq		ib_srq;
 | 
			
		||||
	struct bnxt_qplib_srq	qplib_srq;
 | 
			
		||||
	struct ib_umem		*umem;
 | 
			
		||||
	spinlock_t		lock;		/* protect srq */
 | 
			
		||||
| 
						 | 
				
			
			@ -170,14 +170,14 @@ int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
 | 
			
		|||
int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
 | 
			
		||||
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
 | 
			
		||||
void bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
 | 
			
		||||
struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd,
 | 
			
		||||
int bnxt_re_create_srq(struct ib_srq *srq,
 | 
			
		||||
		       struct ib_srq_init_attr *srq_init_attr,
 | 
			
		||||
		       struct ib_udata *udata);
 | 
			
		||||
int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
 | 
			
		||||
		       enum ib_srq_attr_mask srq_attr_mask,
 | 
			
		||||
		       struct ib_udata *udata);
 | 
			
		||||
int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
 | 
			
		||||
int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
 | 
			
		||||
void bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
 | 
			
		||||
int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
 | 
			
		||||
			  const struct ib_recv_wr **bad_recv_wr);
 | 
			
		||||
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -639,6 +639,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
 | 
			
		|||
	.req_notify_cq = bnxt_re_req_notify_cq,
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -507,7 +507,7 @@ static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
 | 
			
		|||
	writeq(val, db);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
 | 
			
		||||
void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
 | 
			
		||||
			   struct bnxt_qplib_srq *srq)
 | 
			
		||||
{
 | 
			
		||||
	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 | 
			
		||||
| 
						 | 
				
			
			@ -521,14 +521,12 @@ int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
 | 
			
		|||
	/* Configure the request */
 | 
			
		||||
	req.srq_cid = cpu_to_le32(srq->id);
 | 
			
		||||
 | 
			
		||||
	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
 | 
			
		||||
					  (void *)&resp, NULL, 0);
 | 
			
		||||
	if (rc)
 | 
			
		||||
		return rc;
 | 
			
		||||
 | 
			
		||||
	bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
 | 
			
		||||
	rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
 | 
			
		||||
					  (struct creq_base *)&resp, NULL, 0);
 | 
			
		||||
	kfree(srq->swq);
 | 
			
		||||
	return 0;
 | 
			
		||||
	if (rc)
 | 
			
		||||
		return;
 | 
			
		||||
	bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -518,7 +518,7 @@ int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
 | 
			
		|||
			  struct bnxt_qplib_srq *srq);
 | 
			
		||||
int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
 | 
			
		||||
			 struct bnxt_qplib_srq *srq);
 | 
			
		||||
int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
 | 
			
		||||
void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
 | 
			
		||||
			    struct bnxt_qplib_srq *srq);
 | 
			
		||||
int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
 | 
			
		||||
			     struct bnxt_qplib_swqe *wqe);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1000,9 +1000,8 @@ int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
 | 
			
		|||
int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
 | 
			
		||||
		    enum ib_srq_attr_mask srq_attr_mask,
 | 
			
		||||
		    struct ib_udata *udata);
 | 
			
		||||
int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
 | 
			
		||||
struct ib_srq *c4iw_create_srq(struct ib_pd *pd,
 | 
			
		||||
			       struct ib_srq_init_attr *attrs,
 | 
			
		||||
void c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
 | 
			
		||||
int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs,
 | 
			
		||||
		    struct ib_udata *udata);
 | 
			
		||||
int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
 | 
			
		||||
struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -545,6 +545,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
 | 
			
		|||
	.reg_user_mr = c4iw_reg_user_mr,
 | 
			
		||||
	.req_notify_cq = c4iw_arm_cq,
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2683,11 +2683,12 @@ void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
 | 
			
		||||
int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
 | 
			
		||||
			       struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct ib_pd *pd = ib_srq->pd;
 | 
			
		||||
	struct c4iw_dev *rhp;
 | 
			
		||||
	struct c4iw_srq *srq;
 | 
			
		||||
	struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
 | 
			
		||||
	struct c4iw_pd *php;
 | 
			
		||||
	struct c4iw_create_srq_resp uresp;
 | 
			
		||||
	struct c4iw_ucontext *ucontext;
 | 
			
		||||
| 
						 | 
				
			
			@ -2702,11 +2703,11 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
 | 
			
		|||
	rhp = php->rhp;
 | 
			
		||||
 | 
			
		||||
	if (!rhp->rdev.lldi.vr->srq.size)
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
 | 
			
		||||
		return ERR_PTR(-E2BIG);
 | 
			
		||||
		return -E2BIG;
 | 
			
		||||
	if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
 | 
			
		||||
		return ERR_PTR(-E2BIG);
 | 
			
		||||
		return -E2BIG;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
 | 
			
		||||
| 
						 | 
				
			
			@ -2717,15 +2718,9 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
 | 
			
		|||
	ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
 | 
			
		||||
					     ibucontext);
 | 
			
		||||
 | 
			
		||||
	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
 | 
			
		||||
	if (!srq)
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
 | 
			
		||||
	srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
 | 
			
		||||
	if (!srq->wr_waitp) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto err_free_srq;
 | 
			
		||||
	}
 | 
			
		||||
	if (!srq->wr_waitp)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
 | 
			
		||||
	if (srq->idx < 0) {
 | 
			
		||||
| 
						 | 
				
			
			@ -2805,7 +2800,8 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
 | 
			
		|||
			(unsigned long)srq->wq.memsize, attrs->attr.max_wr);
 | 
			
		||||
 | 
			
		||||
	spin_lock_init(&srq->lock);
 | 
			
		||||
	return &srq->ibsrq;
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
err_free_srq_db_key_mm:
 | 
			
		||||
	kfree(srq_db_key_mm);
 | 
			
		||||
err_free_srq_key_mm:
 | 
			
		||||
| 
						 | 
				
			
			@ -2821,12 +2817,10 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
 | 
			
		|||
	c4iw_free_srq_idx(&rhp->rdev, srq->idx);
 | 
			
		||||
err_free_wr_wait:
 | 
			
		||||
	c4iw_put_wr_wait(srq->wr_waitp);
 | 
			
		||||
err_free_srq:
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
	return ERR_PTR(ret);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		||||
void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct c4iw_dev *rhp;
 | 
			
		||||
	struct c4iw_srq *srq;
 | 
			
		||||
| 
						 | 
				
			
			@ -2844,6 +2838,4 @@ int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		|||
		       srq->wr_waitp);
 | 
			
		||||
	c4iw_free_srq_idx(&rhp->rdev, srq->idx);
 | 
			
		||||
	c4iw_put_wr_wait(srq->wr_waitp);
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1142,13 +1142,13 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 | 
			
		|||
int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
 | 
			
		||||
			       struct hns_roce_mtt *mtt, struct ib_umem *umem);
 | 
			
		||||
 | 
			
		||||
struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
 | 
			
		||||
int hns_roce_create_srq(struct ib_srq *srq,
 | 
			
		||||
			struct ib_srq_init_attr *srq_init_attr,
 | 
			
		||||
			struct ib_udata *udata);
 | 
			
		||||
int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
 | 
			
		||||
			enum ib_srq_attr_mask srq_attr_mask,
 | 
			
		||||
			struct ib_udata *udata);
 | 
			
		||||
int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
 | 
			
		||||
void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
 | 
			
		||||
 | 
			
		||||
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
 | 
			
		||||
				 struct ib_qp_init_attr *init_attr,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -491,6 +491,8 @@ static const struct ib_device_ops hns_roce_dev_frmr_ops = {
 | 
			
		|||
static const struct ib_device_ops hns_roce_dev_srq_ops = {
 | 
			
		||||
	.create_srq = hns_roce_create_srq,
 | 
			
		||||
	.destroy_srq = hns_roce_destroy_srq,
 | 
			
		||||
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -206,13 +206,13 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
 | 
			
		||||
int hns_roce_create_srq(struct ib_srq *ib_srq,
 | 
			
		||||
			struct ib_srq_init_attr *srq_init_attr,
 | 
			
		||||
			struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
 | 
			
		||||
	struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
 | 
			
		||||
	struct hns_roce_ib_create_srq_resp resp = {};
 | 
			
		||||
	struct hns_roce_srq *srq;
 | 
			
		||||
	struct hns_roce_srq *srq = to_hr_srq(ib_srq);
 | 
			
		||||
	int srq_desc_size;
 | 
			
		||||
	int srq_buf_size;
 | 
			
		||||
	u32 page_shift;
 | 
			
		||||
| 
						 | 
				
			
			@ -223,11 +223,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
 | 
			
		|||
	/* Check the actual SRQ wqe and SRQ sge num */
 | 
			
		||||
	if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
 | 
			
		||||
	    srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
 | 
			
		||||
	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
 | 
			
		||||
	if (!srq)
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	mutex_init(&srq->mutex);
 | 
			
		||||
	spin_lock_init(&srq->lock);
 | 
			
		||||
| 
						 | 
				
			
			@ -249,17 +245,13 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
 | 
			
		|||
	if (udata) {
 | 
			
		||||
		struct hns_roce_ib_create_srq  ucmd;
 | 
			
		||||
 | 
			
		||||
		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
 | 
			
		||||
			ret = -EFAULT;
 | 
			
		||||
			goto err_srq;
 | 
			
		||||
		}
 | 
			
		||||
		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
 | 
			
		||||
			return -EFAULT;
 | 
			
		||||
 | 
			
		||||
		srq->umem =
 | 
			
		||||
			ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
 | 
			
		||||
		if (IS_ERR(srq->umem)) {
 | 
			
		||||
			ret = PTR_ERR(srq->umem);
 | 
			
		||||
			goto err_srq;
 | 
			
		||||
		}
 | 
			
		||||
		if (IS_ERR(srq->umem))
 | 
			
		||||
			return PTR_ERR(srq->umem);
 | 
			
		||||
 | 
			
		||||
		if (hr_dev->caps.srqwqe_buf_pg_sz) {
 | 
			
		||||
			npages = (ib_umem_page_count(srq->umem) +
 | 
			
		||||
| 
						 | 
				
			
			@ -321,11 +313,9 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
 | 
			
		|||
	} else {
 | 
			
		||||
		page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
 | 
			
		||||
		if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
 | 
			
		||||
				      (1 << page_shift) * 2,
 | 
			
		||||
				      &srq->buf, page_shift)) {
 | 
			
		||||
			ret = -ENOMEM;
 | 
			
		||||
			goto err_srq;
 | 
			
		||||
		}
 | 
			
		||||
				       (1 << page_shift) * 2, &srq->buf,
 | 
			
		||||
				       page_shift))
 | 
			
		||||
			return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
		srq->head = 0;
 | 
			
		||||
		srq->tail = srq->max - 1;
 | 
			
		||||
| 
						 | 
				
			
			@ -340,7 +330,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
 | 
			
		|||
			goto err_srq_mtt;
 | 
			
		||||
 | 
			
		||||
		page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
 | 
			
		||||
		ret = hns_roce_create_idx_que(pd, srq, page_shift);
 | 
			
		||||
		ret = hns_roce_create_idx_que(ib_srq->pd, srq, page_shift);
 | 
			
		||||
		if (ret) {
 | 
			
		||||
			dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n",
 | 
			
		||||
				ret);
 | 
			
		||||
| 
						 | 
				
			
			@ -372,7 +362,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
 | 
			
		|||
 | 
			
		||||
	srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
 | 
			
		||||
 | 
			
		||||
	ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, 0,
 | 
			
		||||
	ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(ib_srq->pd)->pdn, cqn, 0,
 | 
			
		||||
				 &srq->mtt, 0, srq);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto err_wrid;
 | 
			
		||||
| 
						 | 
				
			
			@ -389,7 +379,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
 | 
			
		|||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &srq->ibsrq;
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
err_srqc_alloc:
 | 
			
		||||
	hns_roce_srq_free(hr_dev, srq);
 | 
			
		||||
| 
						 | 
				
			
			@ -418,12 +408,10 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
 | 
			
		|||
	else
 | 
			
		||||
		hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
 | 
			
		||||
 | 
			
		||||
err_srq:
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
	return ERR_PTR(ret);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		||||
void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
 | 
			
		||||
	struct hns_roce_srq *srq = to_hr_srq(ibsrq);
 | 
			
		||||
| 
						 | 
				
			
			@ -440,10 +428,6 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		|||
		hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
 | 
			
		||||
				  &srq->buf);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2561,6 +2561,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
 | 
			
		|||
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -759,13 +759,12 @@ int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
 | 
			
		|||
int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
 | 
			
		||||
void mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags);
 | 
			
		||||
 | 
			
		||||
struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 | 
			
		||||
				  struct ib_srq_init_attr *init_attr,
 | 
			
		||||
int mlx4_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
 | 
			
		||||
		       struct ib_udata *udata);
 | 
			
		||||
int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 | 
			
		||||
		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
 | 
			
		||||
int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
 | 
			
		||||
int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
 | 
			
		||||
void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
 | 
			
		||||
void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
 | 
			
		||||
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
 | 
			
		||||
			  const struct ib_recv_wr **bad_wr);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -69,14 +69,14 @@ static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 | 
			
		||||
int mlx4_ib_create_srq(struct ib_srq *ib_srq,
 | 
			
		||||
		       struct ib_srq_init_attr *init_attr,
 | 
			
		||||
		       struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct mlx4_ib_dev *dev = to_mdev(pd->device);
 | 
			
		||||
	struct mlx4_ib_dev *dev = to_mdev(ib_srq->device);
 | 
			
		||||
	struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
 | 
			
		||||
		udata, struct mlx4_ib_ucontext, ibucontext);
 | 
			
		||||
	struct mlx4_ib_srq *srq;
 | 
			
		||||
	struct mlx4_ib_srq *srq = to_msrq(ib_srq);
 | 
			
		||||
	struct mlx4_wqe_srq_next_seg *next;
 | 
			
		||||
	struct mlx4_wqe_data_seg *scatter;
 | 
			
		||||
	u32 cqn;
 | 
			
		||||
| 
						 | 
				
			
			@ -89,11 +89,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 | 
			
		|||
	/* Sanity check SRQ size before proceeding */
 | 
			
		||||
	if (init_attr->attr.max_wr  >= dev->dev->caps.max_srq_wqes ||
 | 
			
		||||
	    init_attr->attr.max_sge >  dev->dev->caps.max_srq_sge)
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
 | 
			
		||||
	srq = kmalloc(sizeof *srq, GFP_KERNEL);
 | 
			
		||||
	if (!srq)
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	mutex_init(&srq->mutex);
 | 
			
		||||
	spin_lock_init(&srq->lock);
 | 
			
		||||
| 
						 | 
				
			
			@ -111,16 +107,12 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 | 
			
		|||
	if (udata) {
 | 
			
		||||
		struct mlx4_ib_create_srq ucmd;
 | 
			
		||||
 | 
			
		||||
		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
 | 
			
		||||
			err = -EFAULT;
 | 
			
		||||
			goto err_srq;
 | 
			
		||||
		}
 | 
			
		||||
		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
 | 
			
		||||
			return -EFAULT;
 | 
			
		||||
 | 
			
		||||
		srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
 | 
			
		||||
		if (IS_ERR(srq->umem)) {
 | 
			
		||||
			err = PTR_ERR(srq->umem);
 | 
			
		||||
			goto err_srq;
 | 
			
		||||
		}
 | 
			
		||||
		if (IS_ERR(srq->umem))
 | 
			
		||||
			return PTR_ERR(srq->umem);
 | 
			
		||||
 | 
			
		||||
		err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
 | 
			
		||||
				    srq->umem->page_shift, &srq->mtt);
 | 
			
		||||
| 
						 | 
				
			
			@ -137,7 +129,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 | 
			
		|||
	} else {
 | 
			
		||||
		err = mlx4_db_alloc(dev->dev, &srq->db, 0);
 | 
			
		||||
		if (err)
 | 
			
		||||
			goto err_srq;
 | 
			
		||||
			return err;
 | 
			
		||||
 | 
			
		||||
		*srq->db.db = 0;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -184,8 +176,8 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 | 
			
		|||
	xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
 | 
			
		||||
		to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
 | 
			
		||||
		(u16) dev->dev->caps.reserved_xrcds;
 | 
			
		||||
	err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt,
 | 
			
		||||
			     srq->db.dma, &srq->msrq);
 | 
			
		||||
	err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn,
 | 
			
		||||
			     &srq->mtt, srq->db.dma, &srq->msrq);
 | 
			
		||||
	if (err)
 | 
			
		||||
		goto err_wrid;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -200,7 +192,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 | 
			
		|||
 | 
			
		||||
	init_attr->attr.max_wr = srq->msrq.max - 1;
 | 
			
		||||
 | 
			
		||||
	return &srq->ibsrq;
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
err_wrid:
 | 
			
		||||
	if (udata)
 | 
			
		||||
| 
						 | 
				
			
			@ -221,10 +213,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 | 
			
		|||
	if (!udata)
 | 
			
		||||
		mlx4_db_free(dev->dev, &srq->db);
 | 
			
		||||
 | 
			
		||||
err_srq:
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
 | 
			
		||||
	return ERR_PTR(err);
 | 
			
		||||
	return err;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 | 
			
		||||
| 
						 | 
				
			
			@ -271,7 +260,7 @@ int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		||||
void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct mlx4_ib_dev *dev = to_mdev(srq->device);
 | 
			
		||||
	struct mlx4_ib_srq *msrq = to_msrq(srq);
 | 
			
		||||
| 
						 | 
				
			
			@ -293,10 +282,6 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		|||
			      &msrq->buf);
 | 
			
		||||
		mlx4_db_free(dev->dev, &msrq->db);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	kfree(msrq);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4795,19 +4795,21 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
 | 
			
		|||
	attr.ext.cq = devr->c0;
 | 
			
		||||
	attr.ext.xrc.xrcd = devr->x0;
 | 
			
		||||
 | 
			
		||||
	devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
 | 
			
		||||
	if (IS_ERR(devr->s0)) {
 | 
			
		||||
		ret = PTR_ERR(devr->s0);
 | 
			
		||||
	devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
 | 
			
		||||
	if (!devr->s0) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto error4;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	devr->s0->device	= &dev->ib_dev;
 | 
			
		||||
	devr->s0->pd		= devr->p0;
 | 
			
		||||
	devr->s0->uobject       = NULL;
 | 
			
		||||
	devr->s0->event_handler = NULL;
 | 
			
		||||
	devr->s0->srq_context   = NULL;
 | 
			
		||||
	devr->s0->srq_type      = IB_SRQT_XRC;
 | 
			
		||||
	devr->s0->ext.xrc.xrcd	= devr->x0;
 | 
			
		||||
	devr->s0->ext.cq	= devr->c0;
 | 
			
		||||
	ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto err_create;
 | 
			
		||||
 | 
			
		||||
	atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
 | 
			
		||||
	atomic_inc(&devr->s0->ext.cq->usecnt);
 | 
			
		||||
	atomic_inc(&devr->p0->usecnt);
 | 
			
		||||
| 
						 | 
				
			
			@ -4817,18 +4819,21 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
 | 
			
		|||
	attr.attr.max_sge = 1;
 | 
			
		||||
	attr.attr.max_wr = 1;
 | 
			
		||||
	attr.srq_type = IB_SRQT_BASIC;
 | 
			
		||||
	devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
 | 
			
		||||
	if (IS_ERR(devr->s1)) {
 | 
			
		||||
		ret = PTR_ERR(devr->s1);
 | 
			
		||||
	devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
 | 
			
		||||
	if (!devr->s1) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto error5;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	devr->s1->device	= &dev->ib_dev;
 | 
			
		||||
	devr->s1->pd		= devr->p0;
 | 
			
		||||
	devr->s1->uobject       = NULL;
 | 
			
		||||
	devr->s1->event_handler = NULL;
 | 
			
		||||
	devr->s1->srq_context   = NULL;
 | 
			
		||||
	devr->s1->srq_type      = IB_SRQT_BASIC;
 | 
			
		||||
	devr->s1->ext.cq	= devr->c0;
 | 
			
		||||
 | 
			
		||||
	ret = mlx5_ib_create_srq(devr->s1, &attr, NULL);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto error6;
 | 
			
		||||
 | 
			
		||||
	atomic_inc(&devr->p0->usecnt);
 | 
			
		||||
	atomic_set(&devr->s1->usecnt, 0);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -4840,8 +4845,12 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
 | 
			
		|||
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
error6:
 | 
			
		||||
	kfree(devr->s1);
 | 
			
		||||
error5:
 | 
			
		||||
	mlx5_ib_destroy_srq(devr->s0, NULL);
 | 
			
		||||
err_create:
 | 
			
		||||
	kfree(devr->s0);
 | 
			
		||||
error4:
 | 
			
		||||
	mlx5_ib_dealloc_xrcd(devr->x1, NULL);
 | 
			
		||||
error3:
 | 
			
		||||
| 
						 | 
				
			
			@ -4862,7 +4871,9 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
 | 
			
		|||
	int port;
 | 
			
		||||
 | 
			
		||||
	mlx5_ib_destroy_srq(devr->s1, NULL);
 | 
			
		||||
	kfree(devr->s1);
 | 
			
		||||
	mlx5_ib_destroy_srq(devr->s0, NULL);
 | 
			
		||||
	kfree(devr->s0);
 | 
			
		||||
	mlx5_ib_dealloc_xrcd(devr->x0, NULL);
 | 
			
		||||
	mlx5_ib_dealloc_xrcd(devr->x1, NULL);
 | 
			
		||||
	mlx5_ib_destroy_cq(devr->c0, NULL);
 | 
			
		||||
| 
						 | 
				
			
			@ -5989,6 +6000,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
 | 
			
		|||
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1058,13 +1058,12 @@ int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
 | 
			
		|||
		      struct ib_udata *udata);
 | 
			
		||||
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
 | 
			
		||||
void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
 | 
			
		||||
struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 | 
			
		||||
				  struct ib_srq_init_attr *init_attr,
 | 
			
		||||
int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
 | 
			
		||||
		       struct ib_udata *udata);
 | 
			
		||||
int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 | 
			
		||||
		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
 | 
			
		||||
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
 | 
			
		||||
int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
 | 
			
		||||
void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
 | 
			
		||||
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
 | 
			
		||||
			  const struct ib_recv_wr **bad_wr);
 | 
			
		||||
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -214,16 +214,16 @@ static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
 | 
			
		|||
	mlx5_db_free(dev->mdev, &srq->db);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 | 
			
		||||
int mlx5_ib_create_srq(struct ib_srq *ib_srq,
 | 
			
		||||
		       struct ib_srq_init_attr *init_attr,
 | 
			
		||||
		       struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
 | 
			
		||||
	struct mlx5_ib_srq *srq;
 | 
			
		||||
	struct mlx5_ib_dev *dev = to_mdev(ib_srq->device);
 | 
			
		||||
	struct mlx5_ib_srq *srq = to_msrq(ib_srq);
 | 
			
		||||
	size_t desc_size;
 | 
			
		||||
	size_t buf_size;
 | 
			
		||||
	int err;
 | 
			
		||||
	struct mlx5_srq_attr in = {0};
 | 
			
		||||
	struct mlx5_srq_attr in = {};
 | 
			
		||||
	__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
 | 
			
		||||
 | 
			
		||||
	/* Sanity check SRQ size before proceeding */
 | 
			
		||||
| 
						 | 
				
			
			@ -231,13 +231,9 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 | 
			
		|||
		mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
 | 
			
		||||
			    init_attr->attr.max_wr,
 | 
			
		||||
			    max_srq_wqes);
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	srq = kmalloc(sizeof(*srq), GFP_KERNEL);
 | 
			
		||||
	if (!srq)
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
 | 
			
		||||
	mutex_init(&srq->mutex);
 | 
			
		||||
	spin_lock_init(&srq->lock);
 | 
			
		||||
	srq->msrq.max    = roundup_pow_of_two(init_attr->attr.max_wr + 1);
 | 
			
		||||
| 
						 | 
				
			
			@ -245,35 +241,32 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 | 
			
		|||
 | 
			
		||||
	desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
 | 
			
		||||
		    srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
 | 
			
		||||
	if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
 | 
			
		||||
		err = -EINVAL;
 | 
			
		||||
		goto err_srq;
 | 
			
		||||
	}
 | 
			
		||||
	if (desc_size == 0 || srq->msrq.max_gs > desc_size)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	desc_size = roundup_pow_of_two(desc_size);
 | 
			
		||||
	desc_size = max_t(size_t, 32, desc_size);
 | 
			
		||||
	if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
 | 
			
		||||
		err = -EINVAL;
 | 
			
		||||
		goto err_srq;
 | 
			
		||||
	}
 | 
			
		||||
	if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
 | 
			
		||||
		sizeof(struct mlx5_wqe_data_seg);
 | 
			
		||||
	srq->msrq.wqe_shift = ilog2(desc_size);
 | 
			
		||||
	buf_size = srq->msrq.max * desc_size;
 | 
			
		||||
	if (buf_size < desc_size) {
 | 
			
		||||
		err = -EINVAL;
 | 
			
		||||
		goto err_srq;
 | 
			
		||||
	}
 | 
			
		||||
	if (buf_size < desc_size)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	in.type = init_attr->srq_type;
 | 
			
		||||
 | 
			
		||||
	if (udata)
 | 
			
		||||
		err = create_srq_user(pd, srq, &in, udata, buf_size);
 | 
			
		||||
		err = create_srq_user(ib_srq->pd, srq, &in, udata, buf_size);
 | 
			
		||||
	else
 | 
			
		||||
		err = create_srq_kernel(dev, srq, &in, buf_size);
 | 
			
		||||
 | 
			
		||||
	if (err) {
 | 
			
		||||
		mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
 | 
			
		||||
			     udata ? "user" : "kernel", err);
 | 
			
		||||
		goto err_srq;
 | 
			
		||||
		return err;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	in.log_size = ilog2(srq->msrq.max);
 | 
			
		||||
| 
						 | 
				
			
			@ -303,7 +296,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 | 
			
		|||
	else
 | 
			
		||||
		in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
 | 
			
		||||
 | 
			
		||||
	in.pd = to_mpd(pd)->pdn;
 | 
			
		||||
	in.pd = to_mpd(ib_srq->pd)->pdn;
 | 
			
		||||
	in.db_record = srq->db.dma;
 | 
			
		||||
	err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
 | 
			
		||||
	kvfree(in.pas);
 | 
			
		||||
| 
						 | 
				
			
			@ -326,21 +319,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 | 
			
		|||
 | 
			
		||||
	init_attr->attr.max_wr = srq->msrq.max - 1;
 | 
			
		||||
 | 
			
		||||
	return &srq->ibsrq;
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
err_core:
 | 
			
		||||
	mlx5_cmd_destroy_srq(dev, &srq->msrq);
 | 
			
		||||
 | 
			
		||||
err_usr_kern_srq:
 | 
			
		||||
	if (udata)
 | 
			
		||||
		destroy_srq_user(pd, srq, udata);
 | 
			
		||||
		destroy_srq_user(ib_srq->pd, srq, udata);
 | 
			
		||||
	else
 | 
			
		||||
		destroy_srq_kernel(dev, srq);
 | 
			
		||||
 | 
			
		||||
err_srq:
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
 | 
			
		||||
	return ERR_PTR(err);
 | 
			
		||||
	return err;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 | 
			
		||||
| 
						 | 
				
			
			@ -393,7 +383,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
 | 
			
		|||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		||||
void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct mlx5_ib_dev *dev = to_mdev(srq->device);
 | 
			
		||||
	struct mlx5_ib_srq *msrq = to_msrq(srq);
 | 
			
		||||
| 
						 | 
				
			
			@ -411,9 +401,6 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		|||
	} else {
 | 
			
		||||
		destroy_srq_kernel(dev, msrq);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -56,7 +56,7 @@ struct mlx5_srq_table {
 | 
			
		|||
 | 
			
		||||
int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
 | 
			
		||||
			struct mlx5_srq_attr *in);
 | 
			
		||||
int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
 | 
			
		||||
void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
 | 
			
		||||
int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
 | 
			
		||||
		       struct mlx5_srq_attr *out);
 | 
			
		||||
int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -607,7 +607,7 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
 | 
			
		|||
	return err;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
 | 
			
		||||
void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
 | 
			
		||||
{
 | 
			
		||||
	struct mlx5_srq_table *table = &dev->srq_table;
 | 
			
		||||
	struct mlx5_core_srq *tmp;
 | 
			
		||||
| 
						 | 
				
			
			@ -615,16 +615,14 @@ int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
 | 
			
		|||
 | 
			
		||||
	tmp = xa_erase_irq(&table->array, srq->srqn);
 | 
			
		||||
	if (!tmp || tmp != srq)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	err = destroy_srq_split(dev, srq);
 | 
			
		||||
	if (err)
 | 
			
		||||
		return err;
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	mlx5_core_res_put(&srq->common);
 | 
			
		||||
	wait_for_completion(&srq->common.free);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -403,65 +403,53 @@ static void mthca_ah_destroy(struct ib_ah *ah, u32 flags)
 | 
			
		|||
	mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
 | 
			
		||||
static int mthca_create_srq(struct ib_srq *ibsrq,
 | 
			
		||||
			    struct ib_srq_init_attr *init_attr,
 | 
			
		||||
			    struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct mthca_create_srq ucmd;
 | 
			
		||||
	struct mthca_ucontext *context = rdma_udata_to_drv_context(
 | 
			
		||||
		udata, struct mthca_ucontext, ibucontext);
 | 
			
		||||
	struct mthca_srq *srq;
 | 
			
		||||
	struct mthca_srq *srq = to_msrq(ibsrq);
 | 
			
		||||
	int err;
 | 
			
		||||
 | 
			
		||||
	if (init_attr->srq_type != IB_SRQT_BASIC)
 | 
			
		||||
		return ERR_PTR(-EOPNOTSUPP);
 | 
			
		||||
 | 
			
		||||
	srq = kmalloc(sizeof *srq, GFP_KERNEL);
 | 
			
		||||
	if (!srq)
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
		return -EOPNOTSUPP;
 | 
			
		||||
 | 
			
		||||
	if (udata) {
 | 
			
		||||
		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
 | 
			
		||||
			err = -EFAULT;
 | 
			
		||||
			goto err_free;
 | 
			
		||||
		}
 | 
			
		||||
		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
 | 
			
		||||
			return -EFAULT;
 | 
			
		||||
 | 
			
		||||
		err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
 | 
			
		||||
		err = mthca_map_user_db(to_mdev(ibsrq->device), &context->uar,
 | 
			
		||||
					context->db_tab, ucmd.db_index,
 | 
			
		||||
					ucmd.db_page);
 | 
			
		||||
 | 
			
		||||
		if (err)
 | 
			
		||||
			goto err_free;
 | 
			
		||||
			return err;
 | 
			
		||||
 | 
			
		||||
		srq->mr.ibmr.lkey = ucmd.lkey;
 | 
			
		||||
		srq->db_index     = ucmd.db_index;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
 | 
			
		||||
	err = mthca_alloc_srq(to_mdev(ibsrq->device), to_mpd(ibsrq->pd),
 | 
			
		||||
			      &init_attr->attr, srq, udata);
 | 
			
		||||
 | 
			
		||||
	if (err && udata)
 | 
			
		||||
		mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
 | 
			
		||||
		mthca_unmap_user_db(to_mdev(ibsrq->device), &context->uar,
 | 
			
		||||
				    context->db_tab, ucmd.db_index);
 | 
			
		||||
 | 
			
		||||
	if (err)
 | 
			
		||||
		goto err_free;
 | 
			
		||||
		return err;
 | 
			
		||||
 | 
			
		||||
	if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
 | 
			
		||||
		mthca_free_srq(to_mdev(pd->device), srq);
 | 
			
		||||
		err = -EFAULT;
 | 
			
		||||
		goto err_free;
 | 
			
		||||
	if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
 | 
			
		||||
		mthca_free_srq(to_mdev(ibsrq->device), srq);
 | 
			
		||||
		return -EFAULT;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &srq->ibsrq;
 | 
			
		||||
 | 
			
		||||
err_free:
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
 | 
			
		||||
	return ERR_PTR(err);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		||||
static void mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	if (udata) {
 | 
			
		||||
		struct mthca_ucontext *context =
 | 
			
		||||
| 
						 | 
				
			
			@ -475,9 +463,6 @@ static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
 | 
			
		||||
| 
						 | 
				
			
			@ -1210,6 +1195,8 @@ static const struct ib_device_ops mthca_dev_arbel_srq_ops = {
 | 
			
		|||
	.modify_srq = mthca_modify_srq,
 | 
			
		||||
	.post_srq_recv = mthca_arbel_post_srq_recv,
 | 
			
		||||
	.query_srq = mthca_query_srq,
 | 
			
		||||
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
 | 
			
		||||
| 
						 | 
				
			
			@ -1218,6 +1205,8 @@ static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
 | 
			
		|||
	.modify_srq = mthca_modify_srq,
 | 
			
		||||
	.post_srq_recv = mthca_tavor_post_srq_recv,
 | 
			
		||||
	.query_srq = mthca_query_srq,
 | 
			
		||||
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static const struct ib_device_ops mthca_dev_arbel_fmr_ops = {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2863,21 +2863,19 @@ int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
 | 
			
		|||
	return status;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
 | 
			
		||||
void ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
 | 
			
		||||
{
 | 
			
		||||
	int status = -ENOMEM;
 | 
			
		||||
	struct ocrdma_destroy_srq *cmd;
 | 
			
		||||
	struct pci_dev *pdev = dev->nic_info.pdev;
 | 
			
		||||
	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
 | 
			
		||||
	if (!cmd)
 | 
			
		||||
		return status;
 | 
			
		||||
		return;
 | 
			
		||||
	cmd->id = srq->id;
 | 
			
		||||
	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
 | 
			
		||||
	ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
 | 
			
		||||
	if (srq->rq.va)
 | 
			
		||||
		dma_free_coherent(&pdev->dev, srq->rq.len,
 | 
			
		||||
				  srq->rq.va, srq->rq.pa);
 | 
			
		||||
	kfree(cmd);
 | 
			
		||||
	return status;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -137,7 +137,7 @@ int ocrdma_mbx_create_srq(struct ocrdma_dev *, struct ocrdma_srq *,
 | 
			
		|||
			  struct ocrdma_pd *);
 | 
			
		||||
int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
 | 
			
		||||
int ocrdma_mbx_query_srq(struct ocrdma_srq *, struct ib_srq_attr *);
 | 
			
		||||
int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);
 | 
			
		||||
void ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq);
 | 
			
		||||
 | 
			
		||||
int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah);
 | 
			
		||||
void ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -191,6 +191,8 @@ static const struct ib_device_ops ocrdma_dev_srq_ops = {
 | 
			
		|||
	.modify_srq = ocrdma_modify_srq,
 | 
			
		||||
	.post_srq_recv = ocrdma_post_srq_recv,
 | 
			
		||||
	.query_srq = ocrdma_query_srq,
 | 
			
		||||
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_srq, ocrdma_srq, ibsrq),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static int ocrdma_register_device(struct ocrdma_dev *dev)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1805,45 +1805,43 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
 | 
			
		|||
	return status;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
 | 
			
		||||
				 struct ib_srq_init_attr *init_attr,
 | 
			
		||||
int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
 | 
			
		||||
		      struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	int status = -ENOMEM;
 | 
			
		||||
	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
 | 
			
		||||
	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
 | 
			
		||||
	struct ocrdma_srq *srq;
 | 
			
		||||
	int status;
 | 
			
		||||
	struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd);
 | 
			
		||||
	struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
 | 
			
		||||
	struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq);
 | 
			
		||||
 | 
			
		||||
	if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	if (init_attr->attr.max_wr > dev->attr.max_rqe)
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
 | 
			
		||||
	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
 | 
			
		||||
	if (!srq)
 | 
			
		||||
		return ERR_PTR(status);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	spin_lock_init(&srq->q_lock);
 | 
			
		||||
	srq->pd = pd;
 | 
			
		||||
	srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
 | 
			
		||||
	status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
 | 
			
		||||
	if (status)
 | 
			
		||||
		goto err;
 | 
			
		||||
		return status;
 | 
			
		||||
 | 
			
		||||
	if (udata == NULL) {
 | 
			
		||||
		status = -ENOMEM;
 | 
			
		||||
	if (!udata) {
 | 
			
		||||
		srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
 | 
			
		||||
					     GFP_KERNEL);
 | 
			
		||||
		if (srq->rqe_wr_id_tbl == NULL)
 | 
			
		||||
		if (!srq->rqe_wr_id_tbl) {
 | 
			
		||||
			status = -ENOMEM;
 | 
			
		||||
			goto arm_err;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		srq->bit_fields_len = (srq->rq.max_cnt / 32) +
 | 
			
		||||
		    (srq->rq.max_cnt % 32 ? 1 : 0);
 | 
			
		||||
		srq->idx_bit_fields =
 | 
			
		||||
		    kmalloc_array(srq->bit_fields_len, sizeof(u32),
 | 
			
		||||
				  GFP_KERNEL);
 | 
			
		||||
		if (srq->idx_bit_fields == NULL)
 | 
			
		||||
		if (!srq->idx_bit_fields) {
 | 
			
		||||
			status = -ENOMEM;
 | 
			
		||||
			goto arm_err;
 | 
			
		||||
		}
 | 
			
		||||
		memset(srq->idx_bit_fields, 0xff,
 | 
			
		||||
		       srq->bit_fields_len * sizeof(u32));
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -1860,15 +1858,13 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
 | 
			
		|||
			goto arm_err;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &srq->ibsrq;
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
arm_err:
 | 
			
		||||
	ocrdma_mbx_destroy_srq(dev, srq);
 | 
			
		||||
err:
 | 
			
		||||
	kfree(srq->rqe_wr_id_tbl);
 | 
			
		||||
	kfree(srq->idx_bit_fields);
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
	return ERR_PTR(status);
 | 
			
		||||
	return status;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int ocrdma_modify_srq(struct ib_srq *ibsrq,
 | 
			
		||||
| 
						 | 
				
			
			@ -1897,15 +1893,14 @@ int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
 | 
			
		|||
	return status;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		||||
void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	int status;
 | 
			
		||||
	struct ocrdma_srq *srq;
 | 
			
		||||
	struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
 | 
			
		||||
 | 
			
		||||
	srq = get_ocrdma_srq(ibsrq);
 | 
			
		||||
 | 
			
		||||
	status = ocrdma_mbx_destroy_srq(dev, srq);
 | 
			
		||||
	ocrdma_mbx_destroy_srq(dev, srq);
 | 
			
		||||
 | 
			
		||||
	if (srq->pd->uctx)
 | 
			
		||||
		ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
 | 
			
		||||
| 
						 | 
				
			
			@ -1913,8 +1908,6 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		|||
 | 
			
		||||
	kfree(srq->idx_bit_fields);
 | 
			
		||||
	kfree(srq->rqe_wr_id_tbl);
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
	return status;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* unprivileged verbs and their support functions. */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -91,12 +91,12 @@ int ocrdma_query_qp(struct ib_qp *,
 | 
			
		|||
int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
 | 
			
		||||
void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
 | 
			
		||||
 | 
			
		||||
struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
 | 
			
		||||
				 struct ib_udata *);
 | 
			
		||||
int ocrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attr,
 | 
			
		||||
		      struct ib_udata *udata);
 | 
			
		||||
int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,
 | 
			
		||||
		      enum ib_srq_attr_mask, struct ib_udata *);
 | 
			
		||||
int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);
 | 
			
		||||
int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
 | 
			
		||||
void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
 | 
			
		||||
int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *,
 | 
			
		||||
			 const struct ib_recv_wr **bad_recv_wr);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -241,6 +241,7 @@ static const struct ib_device_ops qedr_dev_ops = {
 | 
			
		|||
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1267,7 +1267,7 @@ static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int qedr_check_srq_params(struct ib_pd *ibpd, struct qedr_dev *dev,
 | 
			
		||||
static int qedr_check_srq_params(struct qedr_dev *dev,
 | 
			
		||||
				 struct ib_srq_init_attr *attrs,
 | 
			
		||||
				 struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -1383,33 +1383,28 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
 | 
			
		|||
	return rc;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
 | 
			
		||||
			       struct ib_srq_init_attr *init_attr,
 | 
			
		||||
int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
 | 
			
		||||
		    struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct qed_rdma_destroy_srq_in_params destroy_in_params;
 | 
			
		||||
	struct qed_rdma_create_srq_in_params in_params = {};
 | 
			
		||||
	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
 | 
			
		||||
	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
 | 
			
		||||
	struct qed_rdma_create_srq_out_params out_params;
 | 
			
		||||
	struct qedr_pd *pd = get_qedr_pd(ibpd);
 | 
			
		||||
	struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
 | 
			
		||||
	struct qedr_create_srq_ureq ureq = {};
 | 
			
		||||
	u64 pbl_base_addr, phy_prod_pair_addr;
 | 
			
		||||
	struct qedr_srq_hwq_info *hw_srq;
 | 
			
		||||
	u32 page_cnt, page_size;
 | 
			
		||||
	struct qedr_srq *srq;
 | 
			
		||||
	struct qedr_srq *srq = get_qedr_srq(ibsrq);
 | 
			
		||||
	int rc = 0;
 | 
			
		||||
 | 
			
		||||
	DP_DEBUG(dev, QEDR_MSG_QP,
 | 
			
		||||
		 "create SRQ called from %s (pd %p)\n",
 | 
			
		||||
		 (udata) ? "User lib" : "kernel", pd);
 | 
			
		||||
 | 
			
		||||
	rc = qedr_check_srq_params(ibpd, dev, init_attr, udata);
 | 
			
		||||
	rc = qedr_check_srq_params(dev, init_attr, udata);
 | 
			
		||||
	if (rc)
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
 | 
			
		||||
	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
 | 
			
		||||
	if (!srq)
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	srq->dev = dev;
 | 
			
		||||
	hw_srq = &srq->hw_srq;
 | 
			
		||||
| 
						 | 
				
			
			@ -1471,7 +1466,7 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
 | 
			
		|||
 | 
			
		||||
	DP_DEBUG(dev, QEDR_MSG_SRQ,
 | 
			
		||||
		 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
 | 
			
		||||
	return &srq->ibsrq;
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
err2:
 | 
			
		||||
	destroy_in_params.srq_id = srq->srq_id;
 | 
			
		||||
| 
						 | 
				
			
			@ -1483,12 +1478,10 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
 | 
			
		|||
	else
 | 
			
		||||
		qedr_free_srq_kernel_params(srq);
 | 
			
		||||
err0:
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
 | 
			
		||||
	return ERR_PTR(-EFAULT);
 | 
			
		||||
	return -EFAULT;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		||||
void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct qed_rdma_destroy_srq_in_params in_params = {};
 | 
			
		||||
	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
 | 
			
		||||
| 
						 | 
				
			
			@ -1506,9 +1499,6 @@ int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		|||
	DP_DEBUG(dev, QEDR_MSG_SRQ,
 | 
			
		||||
		 "destroy srq: destroyed srq with srq_id=0x%0x\n",
 | 
			
		||||
		 srq->srq_id);
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -64,13 +64,12 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
 | 
			
		|||
		  int qp_attr_mask, struct ib_qp_init_attr *);
 | 
			
		||||
int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
 | 
			
		||||
 | 
			
		||||
struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
 | 
			
		||||
			       struct ib_srq_init_attr *attr,
 | 
			
		||||
int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *attr,
 | 
			
		||||
		    struct ib_udata *udata);
 | 
			
		||||
int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 | 
			
		||||
		    enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
 | 
			
		||||
int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
 | 
			
		||||
int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
 | 
			
		||||
void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
 | 
			
		||||
int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
 | 
			
		||||
		       const struct ib_recv_wr **bad_recv_wr);
 | 
			
		||||
int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -206,6 +206,8 @@ static const struct ib_device_ops pvrdma_dev_srq_ops = {
 | 
			
		|||
	.destroy_srq = pvrdma_destroy_srq,
 | 
			
		||||
	.modify_srq = pvrdma_modify_srq,
 | 
			
		||||
	.query_srq = pvrdma_query_srq,
 | 
			
		||||
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_srq, pvrdma_srq, ibsrq),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static int pvrdma_register_device(struct pvrdma_dev *dev)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -94,19 +94,18 @@ int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
 | 
			
		|||
 * @init_attr: shared receive queue attributes
 | 
			
		||||
 * @udata: user data
 | 
			
		||||
 *
 | 
			
		||||
 * @return: the ib_srq pointer on success, otherwise returns an errno.
 | 
			
		||||
 * @return: 0 on success, otherwise returns an errno.
 | 
			
		||||
 */
 | 
			
		||||
struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
 | 
			
		||||
				 struct ib_srq_init_attr *init_attr,
 | 
			
		||||
int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
 | 
			
		||||
		      struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct pvrdma_srq *srq = NULL;
 | 
			
		||||
	struct pvrdma_dev *dev = to_vdev(pd->device);
 | 
			
		||||
	struct pvrdma_srq *srq = to_vsrq(ibsrq);
 | 
			
		||||
	struct pvrdma_dev *dev = to_vdev(ibsrq->device);
 | 
			
		||||
	union pvrdma_cmd_req req;
 | 
			
		||||
	union pvrdma_cmd_resp rsp;
 | 
			
		||||
	struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
 | 
			
		||||
	struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
 | 
			
		||||
	struct pvrdma_create_srq_resp srq_resp = {0};
 | 
			
		||||
	struct pvrdma_create_srq_resp srq_resp = {};
 | 
			
		||||
	struct pvrdma_create_srq ucmd;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	int ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -115,31 +114,25 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
 | 
			
		|||
		/* No support for kernel clients. */
 | 
			
		||||
		dev_warn(&dev->pdev->dev,
 | 
			
		||||
			 "no shared receive queue support for kernel client\n");
 | 
			
		||||
		return ERR_PTR(-EOPNOTSUPP);
 | 
			
		||||
		return -EOPNOTSUPP;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (init_attr->srq_type != IB_SRQT_BASIC) {
 | 
			
		||||
		dev_warn(&dev->pdev->dev,
 | 
			
		||||
			 "shared receive queue type %d not supported\n",
 | 
			
		||||
			 init_attr->srq_type);
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (init_attr->attr.max_wr  > dev->dsr->caps.max_srq_wr ||
 | 
			
		||||
	    init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
 | 
			
		||||
		dev_warn(&dev->pdev->dev,
 | 
			
		||||
			 "shared receive queue size invalid\n");
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
 | 
			
		||||
	srq = kmalloc(sizeof(*srq), GFP_KERNEL);
 | 
			
		||||
	if (!srq) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto err_srq;
 | 
			
		||||
	}
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	spin_lock_init(&srq->lock);
 | 
			
		||||
	refcount_set(&srq->refcnt, 1);
 | 
			
		||||
| 
						 | 
				
			
			@ -181,7 +174,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
 | 
			
		|||
	cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
 | 
			
		||||
	cmd->srq_type = init_attr->srq_type;
 | 
			
		||||
	cmd->nchunks = srq->npages;
 | 
			
		||||
	cmd->pd_handle = to_vpd(pd)->pd_handle;
 | 
			
		||||
	cmd->pd_handle = to_vpd(ibsrq->pd)->pd_handle;
 | 
			
		||||
	cmd->attrs.max_wr = init_attr->attr.max_wr;
 | 
			
		||||
	cmd->attrs.max_sge = init_attr->attr.max_sge;
 | 
			
		||||
	cmd->attrs.srq_limit = init_attr->attr.srq_limit;
 | 
			
		||||
| 
						 | 
				
			
			@ -205,20 +198,19 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
 | 
			
		|||
	if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
 | 
			
		||||
		dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
 | 
			
		||||
		pvrdma_destroy_srq(&srq->ibsrq, udata);
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &srq->ibsrq;
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
err_page_dir:
 | 
			
		||||
	pvrdma_page_dir_cleanup(dev, &srq->pdir);
 | 
			
		||||
err_umem:
 | 
			
		||||
	ib_umem_release(srq->umem);
 | 
			
		||||
err_srq:
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
	atomic_dec(&dev->num_srqs);
 | 
			
		||||
 | 
			
		||||
	return ERR_PTR(ret);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
 | 
			
		||||
| 
						 | 
				
			
			@ -250,7 +242,7 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
 | 
			
		|||
 *
 | 
			
		||||
 * @return: 0 for success.
 | 
			
		||||
 */
 | 
			
		||||
int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		||||
void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct pvrdma_srq *vsrq = to_vsrq(srq);
 | 
			
		||||
	union pvrdma_cmd_req req;
 | 
			
		||||
| 
						 | 
				
			
			@ -269,8 +261,6 @@ int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 | 
			
		|||
			 ret);
 | 
			
		||||
 | 
			
		||||
	pvrdma_free_srq(dev, vsrq);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -419,13 +419,12 @@ int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
 | 
			
		|||
		     struct ib_udata *udata);
 | 
			
		||||
void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags);
 | 
			
		||||
 | 
			
		||||
struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
 | 
			
		||||
				 struct ib_srq_init_attr *init_attr,
 | 
			
		||||
int pvrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
 | 
			
		||||
		      struct ib_udata *udata);
 | 
			
		||||
int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 | 
			
		||||
		      enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
 | 
			
		||||
int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
 | 
			
		||||
int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
 | 
			
		||||
void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
 | 
			
		||||
 | 
			
		||||
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
 | 
			
		||||
			       struct ib_qp_init_attr *init_attr,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -71,29 +71,24 @@ void rvt_driver_srq_init(struct rvt_dev_info *rdi)
 | 
			
		|||
 * @srq_init_attr: the attributes of the SRQ
 | 
			
		||||
 * @udata: data from libibverbs when creating a user SRQ
 | 
			
		||||
 *
 | 
			
		||||
 * Return: Allocated srq object
 | 
			
		||||
 * Return: 0 on success
 | 
			
		||||
 */
 | 
			
		||||
struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
 | 
			
		||||
			      struct ib_srq_init_attr *srq_init_attr,
 | 
			
		||||
int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
 | 
			
		||||
		   struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
 | 
			
		||||
	struct rvt_srq *srq;
 | 
			
		||||
	struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
 | 
			
		||||
	struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
 | 
			
		||||
	u32 sz;
 | 
			
		||||
	struct ib_srq *ret;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (srq_init_attr->srq_type != IB_SRQT_BASIC)
 | 
			
		||||
		return ERR_PTR(-EOPNOTSUPP);
 | 
			
		||||
		return -EOPNOTSUPP;
 | 
			
		||||
 | 
			
		||||
	if (srq_init_attr->attr.max_sge == 0 ||
 | 
			
		||||
	    srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
 | 
			
		||||
	    srq_init_attr->attr.max_wr == 0 ||
 | 
			
		||||
	    srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
 | 
			
		||||
	srq = kzalloc_node(sizeof(*srq), GFP_KERNEL, dev->dparms.node);
 | 
			
		||||
	if (!srq)
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Need to use vmalloc() if we want to support large #s of entries.
 | 
			
		||||
| 
						 | 
				
			
			@ -107,7 +102,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
 | 
			
		|||
		vzalloc_node(sizeof(struct rvt_rwq) + srq->rq.size * sz,
 | 
			
		||||
			     dev->dparms.node);
 | 
			
		||||
	if (!srq->rq.wq) {
 | 
			
		||||
		ret = ERR_PTR(-ENOMEM);
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto bail_srq;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -116,22 +111,19 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
 | 
			
		|||
	 * See rvt_mmap() for details.
 | 
			
		||||
	 */
 | 
			
		||||
	if (udata && udata->outlen >= sizeof(__u64)) {
 | 
			
		||||
		int err;
 | 
			
		||||
		u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
 | 
			
		||||
 | 
			
		||||
		srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
 | 
			
		||||
		if (!srq->ip) {
 | 
			
		||||
			ret = ERR_PTR(-ENOMEM);
 | 
			
		||||
			ret = -ENOMEM;
 | 
			
		||||
			goto bail_wq;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		err = ib_copy_to_udata(udata, &srq->ip->offset,
 | 
			
		||||
		ret = ib_copy_to_udata(udata, &srq->ip->offset,
 | 
			
		||||
				       sizeof(srq->ip->offset));
 | 
			
		||||
		if (err) {
 | 
			
		||||
			ret = ERR_PTR(err);
 | 
			
		||||
		if (ret)
 | 
			
		||||
			goto bail_ip;
 | 
			
		||||
	}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * ib_create_srq() will initialize srq->ibsrq.
 | 
			
		||||
| 
						 | 
				
			
			@ -142,7 +134,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
 | 
			
		|||
	spin_lock(&dev->n_srqs_lock);
 | 
			
		||||
	if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
 | 
			
		||||
		spin_unlock(&dev->n_srqs_lock);
 | 
			
		||||
		ret = ERR_PTR(-ENOMEM);
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto bail_ip;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -155,14 +147,13 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
 | 
			
		|||
		spin_unlock_irq(&dev->pending_lock);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &srq->ibsrq;
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
bail_ip:
 | 
			
		||||
	kfree(srq->ip);
 | 
			
		||||
bail_wq:
 | 
			
		||||
	vfree(srq->rq.wq);
 | 
			
		||||
bail_srq:
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -334,9 +325,8 @@ int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
 | 
			
		|||
 * rvt_destroy_srq - destory an srq
 | 
			
		||||
 * @ibsrq: srq object to destroy
 | 
			
		||||
 *
 | 
			
		||||
 * Return always 0
 | 
			
		||||
 */
 | 
			
		||||
int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		||||
void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
 | 
			
		||||
	struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
 | 
			
		||||
| 
						 | 
				
			
			@ -348,7 +338,4 @@ int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		|||
		kref_put(&srq->ip->ref, rvt_release_mmap_info);
 | 
			
		||||
	else
 | 
			
		||||
		vfree(srq->rq.wq);
 | 
			
		||||
	kfree(srq);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -50,13 +50,12 @@
 | 
			
		|||
 | 
			
		||||
#include <rdma/rdma_vt.h>
 | 
			
		||||
void rvt_driver_srq_init(struct rvt_dev_info *rdi);
 | 
			
		||||
struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
 | 
			
		||||
			      struct ib_srq_init_attr *srq_init_attr,
 | 
			
		||||
int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
 | 
			
		||||
		   struct ib_udata *udata);
 | 
			
		||||
int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 | 
			
		||||
		   enum ib_srq_attr_mask attr_mask,
 | 
			
		||||
		   struct ib_udata *udata);
 | 
			
		||||
int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
 | 
			
		||||
int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
 | 
			
		||||
void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
 | 
			
		||||
 | 
			
		||||
#endif          /* DEF_RVTSRQ_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -428,6 +428,7 @@ static const struct ib_device_ops rvt_dev_ops = {
 | 
			
		|||
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_srq, rvt_srq, ibsrq),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -57,7 +57,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
 | 
			
		|||
	[RXE_TYPE_SRQ] = {
 | 
			
		||||
		.name		= "rxe-srq",
 | 
			
		||||
		.size		= sizeof(struct rxe_srq),
 | 
			
		||||
		.flags		= RXE_POOL_INDEX,
 | 
			
		||||
		.flags		= RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
 | 
			
		||||
		.min_index	= RXE_MIN_SRQ_INDEX,
 | 
			
		||||
		.max_index	= RXE_MAX_SRQ_INDEX,
 | 
			
		||||
	},
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -289,19 +289,18 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
 | 
			
		|||
	return err;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
 | 
			
		||||
				     struct ib_srq_init_attr *init,
 | 
			
		||||
static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
 | 
			
		||||
			  struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	int err;
 | 
			
		||||
	struct rxe_dev *rxe = to_rdev(ibpd->device);
 | 
			
		||||
	struct rxe_pd *pd = to_rpd(ibpd);
 | 
			
		||||
	struct rxe_srq *srq;
 | 
			
		||||
	struct rxe_dev *rxe = to_rdev(ibsrq->device);
 | 
			
		||||
	struct rxe_pd *pd = to_rpd(ibsrq->pd);
 | 
			
		||||
	struct rxe_srq *srq = to_rsrq(ibsrq);
 | 
			
		||||
	struct rxe_create_srq_resp __user *uresp = NULL;
 | 
			
		||||
 | 
			
		||||
	if (udata) {
 | 
			
		||||
		if (udata->outlen < sizeof(*uresp))
 | 
			
		||||
			return ERR_PTR(-EINVAL);
 | 
			
		||||
			return -EINVAL;
 | 
			
		||||
		uresp = udata->outbuf;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -309,13 +308,10 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
 | 
			
		|||
	if (err)
 | 
			
		||||
		goto err1;
 | 
			
		||||
 | 
			
		||||
	srq = rxe_alloc(&rxe->srq_pool);
 | 
			
		||||
	if (!srq) {
 | 
			
		||||
		err = -ENOMEM;
 | 
			
		||||
	err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
 | 
			
		||||
	if (err)
 | 
			
		||||
		goto err1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rxe_add_index(srq);
 | 
			
		||||
	rxe_add_ref(pd);
 | 
			
		||||
	srq->pd = pd;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -323,14 +319,13 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
 | 
			
		|||
	if (err)
 | 
			
		||||
		goto err2;
 | 
			
		||||
 | 
			
		||||
	return &srq->ibsrq;
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
err2:
 | 
			
		||||
	rxe_drop_ref(pd);
 | 
			
		||||
	rxe_drop_index(srq);
 | 
			
		||||
	rxe_drop_ref(srq);
 | 
			
		||||
err1:
 | 
			
		||||
	return ERR_PTR(err);
 | 
			
		||||
	return err;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 | 
			
		||||
| 
						 | 
				
			
			@ -378,7 +373,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		||||
static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		||||
{
 | 
			
		||||
	struct rxe_srq *srq = to_rsrq(ibsrq);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -386,10 +381,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 | 
			
		|||
		rxe_queue_cleanup(srq->rq.queue);
 | 
			
		||||
 | 
			
		||||
	rxe_drop_ref(srq->pd);
 | 
			
		||||
	rxe_drop_index(srq);
 | 
			
		||||
	rxe_drop_ref(srq);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
 | 
			
		||||
| 
						 | 
				
			
			@ -1166,6 +1158,7 @@ static const struct ib_device_ops rxe_dev_ops = {
 | 
			
		|||
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
 | 
			
		||||
	INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -120,8 +120,8 @@ struct rxe_rq {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
struct rxe_srq {
 | 
			
		||||
	struct rxe_pool_entry	pelem;
 | 
			
		||||
	struct ib_srq		ibsrq;
 | 
			
		||||
	struct rxe_pool_entry	pelem;
 | 
			
		||||
	struct rxe_pd		*pd;
 | 
			
		||||
	struct rxe_rq		rq;
 | 
			
		||||
	u32			srq_num;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2406,14 +2406,14 @@ struct ib_device_ops {
 | 
			
		|||
	int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
 | 
			
		||||
	int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
 | 
			
		||||
	void (*destroy_ah)(struct ib_ah *ah, u32 flags);
 | 
			
		||||
	struct ib_srq *(*create_srq)(struct ib_pd *pd,
 | 
			
		||||
	int (*create_srq)(struct ib_srq *srq,
 | 
			
		||||
			  struct ib_srq_init_attr *srq_init_attr,
 | 
			
		||||
			  struct ib_udata *udata);
 | 
			
		||||
	int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
 | 
			
		||||
			  enum ib_srq_attr_mask srq_attr_mask,
 | 
			
		||||
			  struct ib_udata *udata);
 | 
			
		||||
	int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
 | 
			
		||||
	int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
 | 
			
		||||
	void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
 | 
			
		||||
	struct ib_qp *(*create_qp)(struct ib_pd *pd,
 | 
			
		||||
				   struct ib_qp_init_attr *qp_init_attr,
 | 
			
		||||
				   struct ib_udata *udata);
 | 
			
		||||
| 
						 | 
				
			
			@ -2553,6 +2553,7 @@ struct ib_device_ops {
 | 
			
		|||
 | 
			
		||||
	DECLARE_RDMA_OBJ_SIZE(ib_ah);
 | 
			
		||||
	DECLARE_RDMA_OBJ_SIZE(ib_pd);
 | 
			
		||||
	DECLARE_RDMA_OBJ_SIZE(ib_srq);
 | 
			
		||||
	DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue