forked from mirrors/linux
		
	drm/vmwgfx: Convert to new IDA API
Reorder allocation to avoid an awkward lock/unlock/lock sequence. Simpler code due to being able to use ida_alloc_max(), even if we can't eliminate the driver's spinlock. Signed-off-by: Matthew Wilcox <willy@infradead.org> Reviewed-by: Sinclair Yeh <syeh@vmware.com>
This commit is contained in:
		
							parent
							
								
									485258b448
								
							
						
					
					
						commit
						4eb085e42f
					
				
					 1 changed files with 12 additions and 29 deletions
				
			
		| 
						 | 
					@ -51,51 +51,34 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct vmwgfx_gmrid_man *gman =
 | 
						struct vmwgfx_gmrid_man *gman =
 | 
				
			||||||
		(struct vmwgfx_gmrid_man *)man->priv;
 | 
							(struct vmwgfx_gmrid_man *)man->priv;
 | 
				
			||||||
	int ret = 0;
 | 
					 | 
				
			||||||
	int id;
 | 
						int id;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mem->mm_node = NULL;
 | 
						mem->mm_node = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
 | 
				
			||||||
 | 
						if (id < 0)
 | 
				
			||||||
 | 
							return id;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock(&gman->lock);
 | 
						spin_lock(&gman->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (gman->max_gmr_pages > 0) {
 | 
						if (gman->max_gmr_pages > 0) {
 | 
				
			||||||
		gman->used_gmr_pages += bo->num_pages;
 | 
							gman->used_gmr_pages += bo->num_pages;
 | 
				
			||||||
		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
 | 
							if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
 | 
				
			||||||
			goto out_err_locked;
 | 
								goto nospace;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	do {
 | 
						mem->mm_node = gman;
 | 
				
			||||||
		spin_unlock(&gman->lock);
 | 
						mem->start = id;
 | 
				
			||||||
		if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
 | 
						mem->num_pages = bo->num_pages;
 | 
				
			||||||
			ret = -ENOMEM;
 | 
					 | 
				
			||||||
			goto out_err;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		spin_lock(&gman->lock);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		ret = ida_get_new(&gman->gmr_ida, &id);
 | 
					 | 
				
			||||||
		if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
 | 
					 | 
				
			||||||
			ida_remove(&gman->gmr_ida, id);
 | 
					 | 
				
			||||||
			ret = 0;
 | 
					 | 
				
			||||||
			goto out_err_locked;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	} while (ret == -EAGAIN);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (likely(ret == 0)) {
 | 
					 | 
				
			||||||
		mem->mm_node = gman;
 | 
					 | 
				
			||||||
		mem->start = id;
 | 
					 | 
				
			||||||
		mem->num_pages = bo->num_pages;
 | 
					 | 
				
			||||||
	} else
 | 
					 | 
				
			||||||
		goto out_err_locked;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_unlock(&gman->lock);
 | 
						spin_unlock(&gman->lock);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out_err:
 | 
					nospace:
 | 
				
			||||||
	spin_lock(&gman->lock);
 | 
					 | 
				
			||||||
out_err_locked:
 | 
					 | 
				
			||||||
	gman->used_gmr_pages -= bo->num_pages;
 | 
						gman->used_gmr_pages -= bo->num_pages;
 | 
				
			||||||
	spin_unlock(&gman->lock);
 | 
						spin_unlock(&gman->lock);
 | 
				
			||||||
	return ret;
 | 
						ida_free(&gman->gmr_ida, id);
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
 | 
					static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
 | 
				
			||||||
| 
						 | 
					@ -105,8 +88,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
 | 
				
			||||||
		(struct vmwgfx_gmrid_man *)man->priv;
 | 
							(struct vmwgfx_gmrid_man *)man->priv;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (mem->mm_node) {
 | 
						if (mem->mm_node) {
 | 
				
			||||||
 | 
							ida_free(&gman->gmr_ida, mem->start);
 | 
				
			||||||
		spin_lock(&gman->lock);
 | 
							spin_lock(&gman->lock);
 | 
				
			||||||
		ida_remove(&gman->gmr_ida, mem->start);
 | 
					 | 
				
			||||||
		gman->used_gmr_pages -= mem->num_pages;
 | 
							gman->used_gmr_pages -= mem->num_pages;
 | 
				
			||||||
		spin_unlock(&gman->lock);
 | 
							spin_unlock(&gman->lock);
 | 
				
			||||||
		mem->mm_node = NULL;
 | 
							mem->mm_node = NULL;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue