mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: Introduce mm_struct.has_pinned
(Commit message majorly collected from Jason Gunthorpe) Reduce the chance of false positive from page_maybe_dma_pinned() by keeping track if the mm_struct has ever been used with pin_user_pages(). This allows cases that might drive up the page ref_count to avoid any penalty from handling dma_pinned pages. Future work is planned, to provide a more sophisticated solution, likely to turn it into a real counter. For now, make it atomic_t but use it as a boolean for simplicity. Suggested-by: Jason Gunthorpe <jgg@ziepe.ca> Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									a1bffa4874
								
							
						
					
					
						commit
						008cfe4418
					
				
					 3 changed files with 17 additions and 0 deletions
				
			
		| 
						 | 
				
			
			@ -436,6 +436,16 @@ struct mm_struct {
 | 
			
		|||
		 */
 | 
			
		||||
		atomic_t mm_count;
 | 
			
		||||
 | 
			
		||||
		/**
 | 
			
		||||
		 * @has_pinned: Whether this mm has pinned any pages.  This can
 | 
			
		||||
		 * be either replaced in the future by @pinned_vm when it
 | 
			
		||||
		 * becomes stable, or grow into a counter on its own. We're
 | 
			
		||||
		 * aggresive on this bit now - even if the pinned pages were
 | 
			
		||||
		 * unpinned later on, we'll still keep this bit set for the
 | 
			
		||||
		 * lifecycle of this mm just for simplicity.
 | 
			
		||||
		 */
 | 
			
		||||
		atomic_t has_pinned;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_MMU
 | 
			
		||||
		atomic_long_t pgtables_bytes;	/* PTE page table pages */
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1011,6 +1011,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
 | 
			
		|||
	mm_pgtables_bytes_init(mm);
 | 
			
		||||
	mm->map_count = 0;
 | 
			
		||||
	mm->locked_vm = 0;
 | 
			
		||||
	atomic_set(&mm->has_pinned, 0);
 | 
			
		||||
	atomic64_set(&mm->pinned_vm, 0);
 | 
			
		||||
	memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
 | 
			
		||||
	spin_lock_init(&mm->page_table_lock);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										6
									
								
								mm/gup.c
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								mm/gup.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -1255,6 +1255,9 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
 | 
			
		|||
		BUG_ON(*locked != 1);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (flags & FOLL_PIN)
 | 
			
		||||
		atomic_set(¤t->mm->has_pinned, 1);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
 | 
			
		||||
	 * is to set FOLL_GET if the caller wants pages[] filled in (but has
 | 
			
		||||
| 
						 | 
				
			
			@ -2660,6 +2663,9 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
 | 
			
		|||
				       FOLL_FAST_ONLY)))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	if (gup_flags & FOLL_PIN)
 | 
			
		||||
		atomic_set(¤t->mm->has_pinned, 1);
 | 
			
		||||
 | 
			
		||||
	if (!(gup_flags & FOLL_FAST_ONLY))
 | 
			
		||||
		might_lock_read(¤t->mm->mmap_lock);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue