forked from mirrors/linux
		
	drm/ttm: Add helpers for shrinking
Add a number of helpers for shrinking that access core TTM and core MM functionality in a way that make them unsuitable for driver open-coding. v11: - New patch (split off from previous) and additional helpers. v13: - Adapt to ttm_backup interface change. - Take resource off LRU when backed up. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Acked-by: Dave Airlie <airlied@redhat.com> Acked-by: Christian König <christian.koenig@amd.com> Link: https://lore.kernel.org/intel-xe/20250305092220.123405-6-thomas.hellstrom@linux.intel.com
This commit is contained in:
		
							parent
							
								
									f3bcfd04a5
								
							
						
					
					
						commit
						70d645deac
					
				
					 4 changed files with 158 additions and 1 deletions
				
			
		|  | @ -28,7 +28,7 @@ | |||
| /*
 | ||||
|  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/swap.h> | ||||
| #include <linux/vmalloc.h> | ||||
| 
 | ||||
| #include <drm/ttm/ttm_bo.h> | ||||
|  | @ -1052,3 +1052,108 @@ struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs | |||
| 	return bo ? bo : ttm_bo_lru_cursor_next(curs); | ||||
| } | ||||
| EXPORT_SYMBOL(ttm_bo_lru_cursor_first); | ||||
| 
 | ||||
| /**
 | ||||
|  * ttm_bo_shrink() - Helper to shrink a ttm buffer object. | ||||
|  * @ctx: The struct ttm_operation_ctx used for the shrinking operation. | ||||
|  * @bo: The buffer object. | ||||
|  * @flags: Flags governing the shrinking behaviour. | ||||
|  * | ||||
|  * The function uses the ttm_tt_back_up functionality to back up or | ||||
|  * purge a struct ttm_tt. If the bo is not in system, it's first | ||||
|  * moved there. | ||||
|  * | ||||
|  * Return: The number of pages shrunken or purged, or | ||||
|  * negative error code on failure. | ||||
|  */ | ||||
| long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, | ||||
| 		   const struct ttm_bo_shrink_flags flags) | ||||
| { | ||||
| 	static const struct ttm_place sys_placement_flags = { | ||||
| 		.fpfn = 0, | ||||
| 		.lpfn = 0, | ||||
| 		.mem_type = TTM_PL_SYSTEM, | ||||
| 		.flags = 0, | ||||
| 	}; | ||||
| 	static struct ttm_placement sys_placement = { | ||||
| 		.num_placement = 1, | ||||
| 		.placement = &sys_placement_flags, | ||||
| 	}; | ||||
| 	struct ttm_tt *tt = bo->ttm; | ||||
| 	long lret; | ||||
| 
 | ||||
| 	dma_resv_assert_held(bo->base.resv); | ||||
| 
 | ||||
| 	if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) { | ||||
| 		int ret = ttm_bo_validate(bo, &sys_placement, ctx); | ||||
| 
 | ||||
| 		/* Consider -ENOMEM and -ENOSPC non-fatal. */ | ||||
| 		if (ret) { | ||||
| 			if (ret == -ENOMEM || ret == -ENOSPC) | ||||
| 				ret = -EBUSY; | ||||
| 			return ret; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	ttm_bo_unmap_virtual(bo); | ||||
| 	lret = ttm_bo_wait_ctx(bo, ctx); | ||||
| 	if (lret < 0) | ||||
| 		return lret; | ||||
| 
 | ||||
| 	if (bo->bulk_move) { | ||||
| 		spin_lock(&bo->bdev->lru_lock); | ||||
| 		ttm_resource_del_bulk_move(bo->resource, bo); | ||||
| 		spin_unlock(&bo->bdev->lru_lock); | ||||
| 	} | ||||
| 
 | ||||
| 	lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags) | ||||
| 			     {.purge = flags.purge, | ||||
| 			      .writeback = flags.writeback}); | ||||
| 
 | ||||
| 	if (lret <= 0 && bo->bulk_move) { | ||||
| 		spin_lock(&bo->bdev->lru_lock); | ||||
| 		ttm_resource_add_bulk_move(bo->resource, bo); | ||||
| 		spin_unlock(&bo->bdev->lru_lock); | ||||
| 	} | ||||
| 
 | ||||
| 	if (lret < 0 && lret != -EINTR) | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	return lret; | ||||
| } | ||||
| EXPORT_SYMBOL(ttm_bo_shrink); | ||||
| 
 | ||||
| /**
 | ||||
|  * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking | ||||
|  * @ctx: The struct ttm_operation_ctx governing the shrinking. | ||||
|  * @bo: The candidate for shrinking. | ||||
|  * | ||||
|  * Check whether the object, given the information available to TTM, | ||||
|  * is suitable for shinking, This function can and should be used | ||||
|  * before attempting to shrink an object. | ||||
|  * | ||||
|  * Return: true if suitable. false if not. | ||||
|  */ | ||||
| bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) | ||||
| { | ||||
| 	return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count && | ||||
| 		(!ctx->no_wait_gpu || | ||||
| 		 dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)); | ||||
| } | ||||
| EXPORT_SYMBOL(ttm_bo_shrink_suitable); | ||||
| 
 | ||||
| /**
 | ||||
|  * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU | ||||
|  * during shrinking | ||||
|  * | ||||
|  * In some situations, like direct reclaim, waiting (in particular gpu waiting) | ||||
|  * should be avoided since it may stall a system that could otherwise make progress | ||||
|  * shrinking something else less time consuming. | ||||
|  * | ||||
|  * Return: true if gpu waiting should be avoided, false if not. | ||||
|  */ | ||||
| bool ttm_bo_shrink_avoid_wait(void) | ||||
| { | ||||
| 	return !current_is_kswapd(); | ||||
| } | ||||
| EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait); | ||||
|  |  | |||
|  | @ -531,3 +531,32 @@ unsigned long ttm_tt_pages_limit(void) | |||
| 	return ttm_pages_limit; | ||||
| } | ||||
| EXPORT_SYMBOL(ttm_tt_pages_limit); | ||||
| 
 | ||||
| /**
 | ||||
|  * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt | ||||
|  * @tt: The ttm_tt for wich to allocate and assign a backup structure. | ||||
|  * | ||||
|  * Assign a backup structure to be used for tt backup. This should | ||||
|  * typically be done at bo creation, to avoid allocations at shrinking | ||||
|  * time. | ||||
|  * | ||||
|  * Return: 0 on success, negative error code on failure. | ||||
|  */ | ||||
| int ttm_tt_setup_backup(struct ttm_tt *tt) | ||||
| { | ||||
| 	struct ttm_backup *backup = | ||||
| 		ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT); | ||||
| 
 | ||||
| 	if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	if (IS_ERR(backup)) | ||||
| 		return PTR_ERR(backup); | ||||
| 
 | ||||
| 	if (tt->backup) | ||||
| 		ttm_backup_fini(tt->backup); | ||||
| 
 | ||||
| 	tt->backup = backup; | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL(ttm_tt_setup_backup); | ||||
|  |  | |||
|  | @ -225,6 +225,27 @@ struct ttm_lru_walk { | |||
| s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, | ||||
| 			   struct ttm_resource_manager *man, s64 target); | ||||
| 
 | ||||
| /**
 | ||||
|  * struct ttm_bo_shrink_flags - flags to govern the bo shrinking behaviour | ||||
|  * @purge: Purge the content rather than backing it up. | ||||
|  * @writeback: Attempt to immediately write content to swap space. | ||||
|  * @allow_move: Allow moving to system before shrinking. This is typically | ||||
|  * not desired for zombie- or ghost objects (with zombie object meaning | ||||
|  * objects with a zero gem object refcount) | ||||
|  */ | ||||
| struct ttm_bo_shrink_flags { | ||||
| 	u32 purge : 1; | ||||
| 	u32 writeback : 1; | ||||
| 	u32 allow_move : 1; | ||||
| }; | ||||
| 
 | ||||
| long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, | ||||
| 		   const struct ttm_bo_shrink_flags flags); | ||||
| 
 | ||||
| bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx); | ||||
| 
 | ||||
| bool ttm_bo_shrink_avoid_wait(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * ttm_bo_get - reference a struct ttm_buffer_object | ||||
|  * | ||||
|  |  | |||
|  | @ -298,6 +298,8 @@ long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt, | |||
| int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt, | ||||
| 		   const struct ttm_operation_ctx *ctx); | ||||
| 
 | ||||
| int ttm_tt_setup_backup(struct ttm_tt *tt); | ||||
| 
 | ||||
| #if IS_ENABLED(CONFIG_AGP) | ||||
| #include <linux/agp_backend.h> | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Thomas Hellström
						Thomas Hellström