forked from mirrors/linux
		
	IB/mlx5: Fix micro UAR allocator
The micro UAR (uuar) allocator had a bug which resulted from the fact that in each UAR we only have two micro UARs avaialable, those at index 0 and 1. This patch defines iterators to aid in traversing the list of available micro UARs when allocating a uuar. In addition, change the logic in create_user_qp() so that if high class allocation fails (high class means lower latency), we revert to medium class and not to the low class. Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
		
							parent
							
								
									24e42754f6
								
							
						
					
					
						commit
						c1be5232d2
					
				
					 3 changed files with 70 additions and 27 deletions
				
			
		| 
						 | 
				
			
			@ -541,6 +541,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 | 
			
		|||
	struct mlx5_ib_ucontext *context;
 | 
			
		||||
	struct mlx5_uuar_info *uuari;
 | 
			
		||||
	struct mlx5_uar *uars;
 | 
			
		||||
	int gross_uuars;
 | 
			
		||||
	int num_uars;
 | 
			
		||||
	int uuarn;
 | 
			
		||||
	int err;
 | 
			
		||||
| 
						 | 
				
			
			@ -559,11 +560,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 | 
			
		|||
	if (req.total_num_uuars == 0)
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
 | 
			
		||||
	req.total_num_uuars = ALIGN(req.total_num_uuars, MLX5_BF_REGS_PER_PAGE);
 | 
			
		||||
	req.total_num_uuars = ALIGN(req.total_num_uuars,
 | 
			
		||||
				    MLX5_NON_FP_BF_REGS_PER_PAGE);
 | 
			
		||||
	if (req.num_low_latency_uuars > req.total_num_uuars - 1)
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
 | 
			
		||||
	num_uars = req.total_num_uuars / MLX5_BF_REGS_PER_PAGE;
 | 
			
		||||
	num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
 | 
			
		||||
	gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
 | 
			
		||||
	resp.qp_tab_size      = 1 << dev->mdev.caps.log_max_qp;
 | 
			
		||||
	resp.bf_reg_size      = dev->mdev.caps.bf_reg_size;
 | 
			
		||||
	resp.cache_line_size  = L1_CACHE_BYTES;
 | 
			
		||||
| 
						 | 
				
			
			@ -585,7 +588,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 | 
			
		|||
		goto out_ctx;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	uuari->bitmap = kcalloc(BITS_TO_LONGS(req.total_num_uuars),
 | 
			
		||||
	uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
 | 
			
		||||
				sizeof(*uuari->bitmap),
 | 
			
		||||
				GFP_KERNEL);
 | 
			
		||||
	if (!uuari->bitmap) {
 | 
			
		||||
| 
						 | 
				
			
			@ -595,13 +598,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 | 
			
		|||
	/*
 | 
			
		||||
	 * clear all fast path uuars
 | 
			
		||||
	 */
 | 
			
		||||
	for (i = 0; i < req.total_num_uuars; i++) {
 | 
			
		||||
	for (i = 0; i < gross_uuars; i++) {
 | 
			
		||||
		uuarn = i & 3;
 | 
			
		||||
		if (uuarn == 2 || uuarn == 3)
 | 
			
		||||
			set_bit(i, uuari->bitmap);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	uuari->count = kcalloc(req.total_num_uuars, sizeof(*uuari->count), GFP_KERNEL);
 | 
			
		||||
	uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
 | 
			
		||||
	if (!uuari->count) {
 | 
			
		||||
		err = -ENOMEM;
 | 
			
		||||
		goto out_bitmap;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -340,14 +340,57 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
 | 
			
		|||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int first_med_uuar(void)
 | 
			
		||||
{
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int next_uuar(int n)
 | 
			
		||||
{
 | 
			
		||||
	n++;
 | 
			
		||||
 | 
			
		||||
	while (((n % 4) & 2))
 | 
			
		||||
		n++;
 | 
			
		||||
 | 
			
		||||
	return n;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int num_med_uuar(struct mlx5_uuar_info *uuari)
 | 
			
		||||
{
 | 
			
		||||
	int n;
 | 
			
		||||
 | 
			
		||||
	n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
 | 
			
		||||
		uuari->num_low_latency_uuars - 1;
 | 
			
		||||
 | 
			
		||||
	return n >= 0 ? n : 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int max_uuari(struct mlx5_uuar_info *uuari)
 | 
			
		||||
{
 | 
			
		||||
	return uuari->num_uars * 4;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int first_hi_uuar(struct mlx5_uuar_info *uuari)
 | 
			
		||||
{
 | 
			
		||||
	int med;
 | 
			
		||||
	int i;
 | 
			
		||||
	int t;
 | 
			
		||||
 | 
			
		||||
	med = num_med_uuar(uuari);
 | 
			
		||||
	for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
 | 
			
		||||
		t++;
 | 
			
		||||
		if (t == med)
 | 
			
		||||
			return next_uuar(i);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
 | 
			
		||||
{
 | 
			
		||||
	int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
 | 
			
		||||
	int start_uuar;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	start_uuar = nuuars - uuari->num_low_latency_uuars;
 | 
			
		||||
	for (i = start_uuar; i < nuuars; i++) {
 | 
			
		||||
	for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
 | 
			
		||||
		if (!test_bit(i, uuari->bitmap)) {
 | 
			
		||||
			set_bit(i, uuari->bitmap);
 | 
			
		||||
			uuari->count[i]++;
 | 
			
		||||
| 
						 | 
				
			
			@ -360,19 +403,10 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
 | 
			
		|||
 | 
			
		||||
static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
 | 
			
		||||
{
 | 
			
		||||
	int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
 | 
			
		||||
	int minidx = 1;
 | 
			
		||||
	int uuarn;
 | 
			
		||||
	int end;
 | 
			
		||||
	int minidx = first_med_uuar();
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	end = nuuars - uuari->num_low_latency_uuars;
 | 
			
		||||
 | 
			
		||||
	for (i = 1; i < end; i++) {
 | 
			
		||||
		uuarn = i & 3;
 | 
			
		||||
		if (uuarn == 2 || uuarn == 3)
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
	for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
 | 
			
		||||
		if (uuari->count[i] < uuari->count[minidx])
 | 
			
		||||
			minidx = i;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -510,13 +544,18 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 | 
			
		|||
	uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
 | 
			
		||||
	if (uuarn < 0) {
 | 
			
		||||
		mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
 | 
			
		||||
		mlx5_ib_dbg(dev, "reverting to medium latency\n");
 | 
			
		||||
		uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
 | 
			
		||||
		if (uuarn < 0) {
 | 
			
		||||
			mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
 | 
			
		||||
			mlx5_ib_dbg(dev, "reverting to high latency\n");
 | 
			
		||||
			uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
 | 
			
		||||
			if (uuarn < 0) {
 | 
			
		||||
			mlx5_ib_dbg(dev, "uuar allocation failed\n");
 | 
			
		||||
				mlx5_ib_warn(dev, "uuar allocation failed\n");
 | 
			
		||||
				return uuarn;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
 | 
			
		||||
	mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -106,7 +106,8 @@ enum {
 | 
			
		|||
enum {
 | 
			
		||||
	MLX5_BF_REGS_PER_PAGE		= 4,
 | 
			
		||||
	MLX5_MAX_UAR_PAGES		= 1 << 8,
 | 
			
		||||
	MLX5_MAX_UUARS		= MLX5_MAX_UAR_PAGES * MLX5_BF_REGS_PER_PAGE,
 | 
			
		||||
	MLX5_NON_FP_BF_REGS_PER_PAGE	= 2,
 | 
			
		||||
	MLX5_MAX_UUARS	= MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
enum {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue