forked from mirrors/linux
		
	mm/usercopy: Check kmap addresses properly
If you are copying to an address in the kmap region, you may not copy across a page boundary, no matter what the size of the underlying allocation. You can't kmap() a slab page because slab pages always come from low memory. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Kees Cook <keescook@chromium.org> Signed-off-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20220110231530.665970-2-willy@infradead.org
This commit is contained in:
		
							parent
							
								
									a19944809f
								
							
						
					
					
						commit
						4e140f59d2
					
				
					 3 changed files with 21 additions and 6 deletions
				
			
		| 
						 | 
					@ -26,6 +26,7 @@
 | 
				
			||||||
#include <asm/tlbflush.h>
 | 
					#include <asm/tlbflush.h>
 | 
				
			||||||
#include <asm/paravirt.h>
 | 
					#include <asm/paravirt.h>
 | 
				
			||||||
#include <asm/fixmap.h>
 | 
					#include <asm/fixmap.h>
 | 
				
			||||||
 | 
					#include <asm/pgtable_areas.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* declarations for highmem.c */
 | 
					/* declarations for highmem.c */
 | 
				
			||||||
extern unsigned long highstart_pfn, highend_pfn;
 | 
					extern unsigned long highstart_pfn, highend_pfn;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -149,6 +149,11 @@ static inline void totalhigh_pages_add(long count)
 | 
				
			||||||
	atomic_long_add(count, &_totalhigh_pages);
 | 
						atomic_long_add(count, &_totalhigh_pages);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline bool is_kmap_addr(const void *x)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned long addr = (unsigned long)x;
 | 
				
			||||||
 | 
						return addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
#else /* CONFIG_HIGHMEM */
 | 
					#else /* CONFIG_HIGHMEM */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline struct page *kmap_to_page(void *addr)
 | 
					static inline struct page *kmap_to_page(void *addr)
 | 
				
			||||||
| 
						 | 
					@ -234,6 +239,11 @@ static inline void __kunmap_atomic(void *addr)
 | 
				
			||||||
static inline unsigned int nr_free_highpages(void) { return 0; }
 | 
					static inline unsigned int nr_free_highpages(void) { return 0; }
 | 
				
			||||||
static inline unsigned long totalhigh_pages(void) { return 0UL; }
 | 
					static inline unsigned long totalhigh_pages(void) { return 0UL; }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline bool is_kmap_addr(const void *x)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return false;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* CONFIG_HIGHMEM */
 | 
					#endif /* CONFIG_HIGHMEM */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -229,12 +229,16 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
 | 
				
			||||||
	if (!virt_addr_valid(ptr))
 | 
						if (!virt_addr_valid(ptr))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						if (is_kmap_addr(ptr)) {
 | 
				
			||||||
	 * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
 | 
							unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1);
 | 
				
			||||||
	 * highmem page or fallback to virt_to_page(). The following
 | 
					
 | 
				
			||||||
	 * is effectively a highmem-aware virt_to_slab().
 | 
							if ((unsigned long)ptr + n - 1 > page_end)
 | 
				
			||||||
	 */
 | 
								usercopy_abort("kmap", NULL, to_user,
 | 
				
			||||||
	folio = page_folio(kmap_to_page((void *)ptr));
 | 
										offset_in_page(ptr), n);
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						folio = virt_to_folio(ptr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (folio_test_slab(folio)) {
 | 
						if (folio_test_slab(folio)) {
 | 
				
			||||||
		/* Check slab allocator for flags and size. */
 | 
							/* Check slab allocator for flags and size. */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue