forked from mirrors/linux
		
	mm: introduce page_size()
Patch series "Make working with compound pages easier", v2. These three patches add three helpers and convert the appropriate places to use them. This patch (of 3): It's unnecessarily hard to find out the size of a potentially huge page. Replace 'PAGE_SIZE << compound_order(page)' with page_size(page). Link: http://lkml.kernel.org/r/20190721104612.19120-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									1f18b29669
								
							
						
					
					
						commit
						a50b854e07
					
				
					 17 changed files with 35 additions and 38 deletions
				
			
		| 
						 | 
					@ -204,8 +204,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
 | 
				
			||||||
	 * coherent with the kernels mapping.
 | 
						 * coherent with the kernels mapping.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (!PageHighMem(page)) {
 | 
						if (!PageHighMem(page)) {
 | 
				
			||||||
		size_t page_size = PAGE_SIZE << compound_order(page);
 | 
							__cpuc_flush_dcache_area(page_address(page), page_size(page));
 | 
				
			||||||
		__cpuc_flush_dcache_area(page_address(page), page_size);
 | 
					 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		unsigned long i;
 | 
							unsigned long i;
 | 
				
			||||||
		if (cache_is_vipt_nonaliasing()) {
 | 
							if (cache_is_vipt_nonaliasing()) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -56,8 +56,7 @@ void __sync_icache_dcache(pte_t pte)
 | 
				
			||||||
	struct page *page = pte_page(pte);
 | 
						struct page *page = pte_page(pte);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
 | 
						if (!test_and_set_bit(PG_dcache_clean, &page->flags))
 | 
				
			||||||
		sync_icache_aliases(page_address(page),
 | 
							sync_icache_aliases(page_address(page), page_size(page));
 | 
				
			||||||
				    PAGE_SIZE << compound_order(page));
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
 | 
					EXPORT_SYMBOL_GPL(__sync_icache_dcache);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -64,7 +64,7 @@ __ia64_sync_icache_dcache (pte_t pte)
 | 
				
			||||||
	if (test_bit(PG_arch_1, &page->flags))
 | 
						if (test_bit(PG_arch_1, &page->flags))
 | 
				
			||||||
		return;				/* i-cache is already coherent with d-cache */
 | 
							return;				/* i-cache is already coherent with d-cache */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
 | 
						flush_icache_range(addr, addr + page_size(page));
 | 
				
			||||||
	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
 | 
						set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1078,7 +1078,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 | 
				
			||||||
			bool merge;
 | 
								bool merge;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (page)
 | 
								if (page)
 | 
				
			||||||
				pg_size <<= compound_order(page);
 | 
									pg_size = page_size(page);
 | 
				
			||||||
			if (off < pg_size &&
 | 
								if (off < pg_size &&
 | 
				
			||||||
			    skb_can_coalesce(skb, i, page, off)) {
 | 
								    skb_can_coalesce(skb, i, page, off)) {
 | 
				
			||||||
				merge = 1;
 | 
									merge = 1;
 | 
				
			||||||
| 
						 | 
					@ -1105,8 +1105,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 | 
				
			||||||
							   __GFP_NORETRY,
 | 
												   __GFP_NORETRY,
 | 
				
			||||||
							   order);
 | 
												   order);
 | 
				
			||||||
					if (page)
 | 
										if (page)
 | 
				
			||||||
						pg_size <<=
 | 
											pg_size <<= order;
 | 
				
			||||||
							compound_order(page);
 | 
					 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				if (!page) {
 | 
									if (!page) {
 | 
				
			||||||
					page = alloc_page(gfp);
 | 
										page = alloc_page(gfp);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
 | 
				
			||||||
		if (!page)
 | 
							if (!page)
 | 
				
			||||||
			goto free_pages;
 | 
								goto free_pages;
 | 
				
			||||||
		list_add_tail(&page->lru, &pages);
 | 
							list_add_tail(&page->lru, &pages);
 | 
				
			||||||
		size_remaining -= PAGE_SIZE << compound_order(page);
 | 
							size_remaining -= page_size(page);
 | 
				
			||||||
		max_order = compound_order(page);
 | 
							max_order = compound_order(page);
 | 
				
			||||||
		i++;
 | 
							i++;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sg = table->sgl;
 | 
						sg = table->sgl;
 | 
				
			||||||
	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
 | 
						list_for_each_entry_safe(page, tmp_page, &pages, lru) {
 | 
				
			||||||
		sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
 | 
							sg_set_page(sg, page, page_size(page), 0);
 | 
				
			||||||
		sg = sg_next(sg);
 | 
							sg = sg_next(sg);
 | 
				
			||||||
		list_del(&page->lru);
 | 
							list_del(&page->lru);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -136,8 +136,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 | 
				
			||||||
					   page, off_in_page, tlen);
 | 
										   page, off_in_page, tlen);
 | 
				
			||||||
			fr_len(fp) += tlen;
 | 
								fr_len(fp) += tlen;
 | 
				
			||||||
			fp_skb(fp)->data_len += tlen;
 | 
								fp_skb(fp)->data_len += tlen;
 | 
				
			||||||
			fp_skb(fp)->truesize +=
 | 
								fp_skb(fp)->truesize += page_size(page);
 | 
				
			||||||
					PAGE_SIZE << compound_order(page);
 | 
					 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			BUG_ON(!page);
 | 
								BUG_ON(!page);
 | 
				
			||||||
			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
 | 
								from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3319,7 +3319,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page = virt_to_head_page(ptr);
 | 
						page = virt_to_head_page(ptr);
 | 
				
			||||||
	if (sz > (PAGE_SIZE << compound_order(page)))
 | 
						if (sz > page_size(page))
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
 | 
						pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -454,7 +454,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
 | 
				
			||||||
static inline struct hstate *page_hstate(struct page *page)
 | 
					static inline struct hstate *page_hstate(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	VM_BUG_ON_PAGE(!PageHuge(page), page);
 | 
						VM_BUG_ON_PAGE(!PageHuge(page), page);
 | 
				
			||||||
	return size_to_hstate(PAGE_SIZE << compound_order(page));
 | 
						return size_to_hstate(page_size(page));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned hstate_index_to_shift(unsigned index)
 | 
					static inline unsigned hstate_index_to_shift(unsigned index)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -805,6 +805,12 @@ static inline void set_compound_order(struct page *page, unsigned int order)
 | 
				
			||||||
	page[1].compound_order = order;
 | 
						page[1].compound_order = order;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Returns the number of bytes in this potentially compound page. */
 | 
				
			||||||
 | 
					static inline unsigned long page_size(struct page *page)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return PAGE_SIZE << compound_order(page);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void free_compound_page(struct page *page);
 | 
					void free_compound_page(struct page *page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_MMU
 | 
					#ifdef CONFIG_MMU
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -878,7 +878,7 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
 | 
				
			||||||
	head = compound_head(page);
 | 
						head = compound_head(page);
 | 
				
			||||||
	v += (page - head) << PAGE_SHIFT;
 | 
						v += (page - head) << PAGE_SHIFT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
 | 
						if (likely(n <= v && v <= (page_size(head))))
 | 
				
			||||||
		return true;
 | 
							return true;
 | 
				
			||||||
	WARN_ON(1);
 | 
						WARN_ON(1);
 | 
				
			||||||
	return false;
 | 
						return false;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -338,8 +338,7 @@ void kasan_poison_slab(struct page *page)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < (1 << compound_order(page)); i++)
 | 
						for (i = 0; i < (1 << compound_order(page)); i++)
 | 
				
			||||||
		page_kasan_tag_reset(page + i);
 | 
							page_kasan_tag_reset(page + i);
 | 
				
			||||||
	kasan_poison_shadow(page_address(page),
 | 
						kasan_poison_shadow(page_address(page), page_size(page),
 | 
				
			||||||
			PAGE_SIZE << compound_order(page),
 | 
					 | 
				
			||||||
			KASAN_KMALLOC_REDZONE);
 | 
								KASAN_KMALLOC_REDZONE);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -542,7 +541,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
 | 
				
			||||||
	page = virt_to_page(ptr);
 | 
						page = virt_to_page(ptr);
 | 
				
			||||||
	redzone_start = round_up((unsigned long)(ptr + size),
 | 
						redzone_start = round_up((unsigned long)(ptr + size),
 | 
				
			||||||
				KASAN_SHADOW_SCALE_SIZE);
 | 
									KASAN_SHADOW_SCALE_SIZE);
 | 
				
			||||||
	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
 | 
						redzone_end = (unsigned long)ptr + page_size(page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kasan_unpoison_shadow(ptr, size);
 | 
						kasan_unpoison_shadow(ptr, size);
 | 
				
			||||||
	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
 | 
						kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
 | 
				
			||||||
| 
						 | 
					@ -578,8 +577,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
 | 
				
			||||||
			kasan_report_invalid_free(ptr, ip);
 | 
								kasan_report_invalid_free(ptr, ip);
 | 
				
			||||||
			return;
 | 
								return;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
 | 
							kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
 | 
				
			||||||
				KASAN_FREE_PAGE);
 | 
					 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		__kasan_slab_free(page->slab_cache, ptr, ip, false);
 | 
							__kasan_slab_free(page->slab_cache, ptr, ip, false);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -108,7 +108,7 @@ unsigned int kobjsize(const void *objp)
 | 
				
			||||||
	 * The ksize() function is only guaranteed to work for pointers
 | 
						 * The ksize() function is only guaranteed to work for pointers
 | 
				
			||||||
	 * returned by kmalloc(). So handle arbitrary pointers here.
 | 
						 * returned by kmalloc(). So handle arbitrary pointers here.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	return PAGE_SIZE << compound_order(page);
 | 
						return page_size(page);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -153,8 +153,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (unlikely(PageHuge(pvmw->page))) {
 | 
						if (unlikely(PageHuge(pvmw->page))) {
 | 
				
			||||||
		/* when pud is not present, pte will be NULL */
 | 
							/* when pud is not present, pte will be NULL */
 | 
				
			||||||
		pvmw->pte = huge_pte_offset(mm, pvmw->address,
 | 
							pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
 | 
				
			||||||
					    PAGE_SIZE << compound_order(page));
 | 
					 | 
				
			||||||
		if (!pvmw->pte)
 | 
							if (!pvmw->pte)
 | 
				
			||||||
			return false;
 | 
								return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -898,8 +898,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
 | 
						mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
 | 
				
			||||||
				0, vma, vma->vm_mm, address,
 | 
									0, vma, vma->vm_mm, address,
 | 
				
			||||||
				min(vma->vm_end, address +
 | 
									min(vma->vm_end, address + page_size(page)));
 | 
				
			||||||
				    (PAGE_SIZE << compound_order(page))));
 | 
					 | 
				
			||||||
	mmu_notifier_invalidate_range_start(&range);
 | 
						mmu_notifier_invalidate_range_start(&range);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (page_vma_mapped_walk(&pvmw)) {
 | 
						while (page_vma_mapped_walk(&pvmw)) {
 | 
				
			||||||
| 
						 | 
					@ -1372,8 +1371,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
 | 
						mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
 | 
				
			||||||
				address,
 | 
									address,
 | 
				
			||||||
				min(vma->vm_end, address +
 | 
									min(vma->vm_end, address + page_size(page)));
 | 
				
			||||||
				    (PAGE_SIZE << compound_order(page))));
 | 
					 | 
				
			||||||
	if (PageHuge(page)) {
 | 
						if (PageHuge(page)) {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * If sharing is possible, start and end will be adjusted
 | 
							 * If sharing is possible, start and end will be adjusted
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -539,7 +539,7 @@ size_t __ksize(const void *block)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sp = virt_to_page(block);
 | 
						sp = virt_to_page(block);
 | 
				
			||||||
	if (unlikely(!PageSlab(sp)))
 | 
						if (unlikely(!PageSlab(sp)))
 | 
				
			||||||
		return PAGE_SIZE << compound_order(sp);
 | 
							return page_size(sp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
 | 
						align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
 | 
				
			||||||
	m = (unsigned int *)(block - align);
 | 
						m = (unsigned int *)(block - align);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										18
									
								
								mm/slub.c
									
									
									
									
									
								
							
							
						
						
									
										18
									
								
								mm/slub.c
									
									
									
									
									
								
							| 
						 | 
					@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
 | 
				
			||||||
		return 1;
 | 
							return 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	start = page_address(page);
 | 
						start = page_address(page);
 | 
				
			||||||
	length = PAGE_SIZE << compound_order(page);
 | 
						length = page_size(page);
 | 
				
			||||||
	end = start + length;
 | 
						end = start + length;
 | 
				
			||||||
	remainder = length % s->size;
 | 
						remainder = length % s->size;
 | 
				
			||||||
	if (!remainder)
 | 
						if (!remainder)
 | 
				
			||||||
| 
						 | 
					@ -1074,13 +1074,14 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
 | 
				
			||||||
	init_tracking(s, object);
 | 
						init_tracking(s, object);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
 | 
					static
 | 
				
			||||||
 | 
					void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (!(s->flags & SLAB_POISON))
 | 
						if (!(s->flags & SLAB_POISON))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	metadata_access_enable();
 | 
						metadata_access_enable();
 | 
				
			||||||
	memset(addr, POISON_INUSE, PAGE_SIZE << order);
 | 
						memset(addr, POISON_INUSE, page_size(page));
 | 
				
			||||||
	metadata_access_disable();
 | 
						metadata_access_disable();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1340,8 +1341,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
 | 
				
			||||||
#else /* !CONFIG_SLUB_DEBUG */
 | 
					#else /* !CONFIG_SLUB_DEBUG */
 | 
				
			||||||
static inline void setup_object_debug(struct kmem_cache *s,
 | 
					static inline void setup_object_debug(struct kmem_cache *s,
 | 
				
			||||||
			struct page *page, void *object) {}
 | 
								struct page *page, void *object) {}
 | 
				
			||||||
static inline void setup_page_debug(struct kmem_cache *s,
 | 
					static inline
 | 
				
			||||||
			void *addr, int order) {}
 | 
					void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int alloc_debug_processing(struct kmem_cache *s,
 | 
					static inline int alloc_debug_processing(struct kmem_cache *s,
 | 
				
			||||||
	struct page *page, void *object, unsigned long addr) { return 0; }
 | 
						struct page *page, void *object, unsigned long addr) { return 0; }
 | 
				
			||||||
| 
						 | 
					@ -1639,7 +1640,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 | 
				
			||||||
	struct kmem_cache_order_objects oo = s->oo;
 | 
						struct kmem_cache_order_objects oo = s->oo;
 | 
				
			||||||
	gfp_t alloc_gfp;
 | 
						gfp_t alloc_gfp;
 | 
				
			||||||
	void *start, *p, *next;
 | 
						void *start, *p, *next;
 | 
				
			||||||
	int idx, order;
 | 
						int idx;
 | 
				
			||||||
	bool shuffle;
 | 
						bool shuffle;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	flags &= gfp_allowed_mask;
 | 
						flags &= gfp_allowed_mask;
 | 
				
			||||||
| 
						 | 
					@ -1673,7 +1674,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page->objects = oo_objects(oo);
 | 
						page->objects = oo_objects(oo);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	order = compound_order(page);
 | 
					 | 
				
			||||||
	page->slab_cache = s;
 | 
						page->slab_cache = s;
 | 
				
			||||||
	__SetPageSlab(page);
 | 
						__SetPageSlab(page);
 | 
				
			||||||
	if (page_is_pfmemalloc(page))
 | 
						if (page_is_pfmemalloc(page))
 | 
				
			||||||
| 
						 | 
					@ -1683,7 +1683,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	start = page_address(page);
 | 
						start = page_address(page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	setup_page_debug(s, start, order);
 | 
						setup_page_debug(s, page, start);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	shuffle = shuffle_freelist(s, page);
 | 
						shuffle = shuffle_freelist(s, page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3932,7 +3932,7 @@ size_t __ksize(const void *object)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (unlikely(!PageSlab(page))) {
 | 
						if (unlikely(!PageSlab(page))) {
 | 
				
			||||||
		WARN_ON(!PageCompound(page));
 | 
							WARN_ON(!PageCompound(page));
 | 
				
			||||||
		return PAGE_SIZE << compound_order(page);
 | 
							return page_size(page);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return slab_ksize(page->slab_cache);
 | 
						return slab_ksize(page->slab_cache);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -977,7 +977,7 @@ static int xsk_mmap(struct file *file, struct socket *sock,
 | 
				
			||||||
	/* Matches the smp_wmb() in xsk_init_queue */
 | 
						/* Matches the smp_wmb() in xsk_init_queue */
 | 
				
			||||||
	smp_rmb();
 | 
						smp_rmb();
 | 
				
			||||||
	qpg = virt_to_head_page(q->ring);
 | 
						qpg = virt_to_head_page(q->ring);
 | 
				
			||||||
	if (size > (PAGE_SIZE << compound_order(qpg)))
 | 
						if (size > page_size(qpg))
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
 | 
						pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue