forked from mirrors/linux
		
	hugetlb: fix infinite loop in get_futex_key() when backed by huge pages
If a futex key happens to be located within a huge page mapped MAP_PRIVATE, get_futex_key() can go into an infinite loop waiting for a page->mapping that will never exist. See https://bugzilla.redhat.com/show_bug.cgi?id=552257 for more details about the problem. This patch makes page->mapping a poisoned value that includes PAGE_MAPPING_ANON mapped MAP_PRIVATE. This is enough for futex to continue but because of PAGE_MAPPING_ANON, the poisoned value is not dereferenced or used by futex. No other part of the VM should be dereferencing the page->mapping of a hugetlbfs page as its page cache is not on the LRU. This patch fixes the problem with the test case described in the bugzilla. [akpm@linux-foundation.org: mel cant spel] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Darren Hart <darren@dvhart.com> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									9a6a1ecd9e
								
							
						
					
					
						commit
						23be7468e8
					
				
					 2 changed files with 13 additions and 1 deletions
				
			
		|  | @ -48,6 +48,15 @@ | ||||||
| #define POISON_FREE	0x6b	/* for use-after-free poisoning */ | #define POISON_FREE	0x6b	/* for use-after-free poisoning */ | ||||||
| #define	POISON_END	0xa5	/* end-byte of poisoning */ | #define	POISON_END	0xa5	/* end-byte of poisoning */ | ||||||
| 
 | 
 | ||||||
|  | /********** mm/hugetlb.c **********/ | ||||||
|  | /*
 | ||||||
|  |  * Private mappings of hugetlb pages use this poisoned value for | ||||||
|  |  * page->mapping. The core VM should not be doing anything with this mapping | ||||||
|  |  * but futex requires the existence of some page->mapping value even though it | ||||||
|  |  * is unused if PAGE_MAPPING_ANON is set. | ||||||
|  |  */ | ||||||
|  | #define HUGETLB_POISON	((void *)(0x00300300 + POISON_POINTER_DELTA + PAGE_MAPPING_ANON)) | ||||||
|  | 
 | ||||||
| /********** arch/$ARCH/mm/init.c **********/ | /********** arch/$ARCH/mm/init.c **********/ | ||||||
| #define POISON_FREE_INITMEM	0xcc | #define POISON_FREE_INITMEM	0xcc | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -546,6 +546,7 @@ static void free_huge_page(struct page *page) | ||||||
| 
 | 
 | ||||||
| 	mapping = (struct address_space *) page_private(page); | 	mapping = (struct address_space *) page_private(page); | ||||||
| 	set_page_private(page, 0); | 	set_page_private(page, 0); | ||||||
|  | 	page->mapping = NULL; | ||||||
| 	BUG_ON(page_count(page)); | 	BUG_ON(page_count(page)); | ||||||
| 	INIT_LIST_HEAD(&page->lru); | 	INIT_LIST_HEAD(&page->lru); | ||||||
| 
 | 
 | ||||||
|  | @ -2447,8 +2448,10 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | ||||||
| 			spin_lock(&inode->i_lock); | 			spin_lock(&inode->i_lock); | ||||||
| 			inode->i_blocks += blocks_per_huge_page(h); | 			inode->i_blocks += blocks_per_huge_page(h); | ||||||
| 			spin_unlock(&inode->i_lock); | 			spin_unlock(&inode->i_lock); | ||||||
| 		} else | 		} else { | ||||||
| 			lock_page(page); | 			lock_page(page); | ||||||
|  | 			page->mapping = HUGETLB_POISON; | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Mel Gorman
						Mel Gorman