forked from mirrors/linux
		
	zsmalloc: move huge compressed obj from page to zspage
The flag aims for zspage, not per page. Let's move it to zspage. Link: https://lkml.kernel.org/r/20211115185909.3949505-6-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									3ae92ac23b
								
							
						
					
					
						commit
						a41ec880aa
					
				
					 1 changed files with 26 additions and 24 deletions
				
			
		|  | @ -121,6 +121,7 @@ | |||
| #define OBJ_INDEX_BITS	(BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS) | ||||
| #define OBJ_INDEX_MASK	((_AC(1, UL) << OBJ_INDEX_BITS) - 1) | ||||
| 
 | ||||
| #define HUGE_BITS	1 | ||||
| #define FULLNESS_BITS	2 | ||||
| #define CLASS_BITS	8 | ||||
| #define ISOLATED_BITS	3 | ||||
|  | @ -213,22 +214,6 @@ struct size_class { | |||
| 	struct zs_size_stat stats; | ||||
| }; | ||||
| 
 | ||||
| /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ | ||||
| static void SetPageHugeObject(struct page *page) | ||||
| { | ||||
| 	SetPageOwnerPriv1(page); | ||||
| } | ||||
| 
 | ||||
| static void ClearPageHugeObject(struct page *page) | ||||
| { | ||||
| 	ClearPageOwnerPriv1(page); | ||||
| } | ||||
| 
 | ||||
| static int PageHugeObject(struct page *page) | ||||
| { | ||||
| 	return PageOwnerPriv1(page); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Placed within free objects to form a singly linked list. | ||||
|  * For every zspage, zspage->freeobj gives head of this list. | ||||
|  | @ -278,6 +263,7 @@ struct zs_pool { | |||
| 
 | ||||
| struct zspage { | ||||
| 	struct { | ||||
| 		unsigned int huge:HUGE_BITS; | ||||
| 		unsigned int fullness:FULLNESS_BITS; | ||||
| 		unsigned int class:CLASS_BITS + 1; | ||||
| 		unsigned int isolated:ISOLATED_BITS; | ||||
|  | @ -298,6 +284,17 @@ struct mapping_area { | |||
| 	enum zs_mapmode vm_mm; /* mapping mode */ | ||||
| }; | ||||
| 
 | ||||
| /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ | ||||
| static void SetZsHugePage(struct zspage *zspage) | ||||
| { | ||||
| 	zspage->huge = 1; | ||||
| } | ||||
| 
 | ||||
| static bool ZsHugePage(struct zspage *zspage) | ||||
| { | ||||
| 	return zspage->huge; | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_COMPACTION | ||||
| static int zs_register_migration(struct zs_pool *pool); | ||||
| static void zs_unregister_migration(struct zs_pool *pool); | ||||
|  | @ -830,7 +827,9 @@ static struct zspage *get_zspage(struct page *page) | |||
| 
 | ||||
| static struct page *get_next_page(struct page *page) | ||||
| { | ||||
| 	if (unlikely(PageHugeObject(page))) | ||||
| 	struct zspage *zspage = get_zspage(page); | ||||
| 
 | ||||
| 	if (unlikely(ZsHugePage(zspage))) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	return (struct page *)page->index; | ||||
|  | @ -880,8 +879,9 @@ static unsigned long handle_to_obj(unsigned long handle) | |||
| static bool obj_allocated(struct page *page, void *obj, unsigned long *phandle) | ||||
| { | ||||
| 	unsigned long handle; | ||||
| 	struct zspage *zspage = get_zspage(page); | ||||
| 
 | ||||
| 	if (unlikely(PageHugeObject(page))) { | ||||
| 	if (unlikely(ZsHugePage(zspage))) { | ||||
| 		VM_BUG_ON_PAGE(!is_first_page(page), page); | ||||
| 		handle = page->index; | ||||
| 	} else | ||||
|  | @ -920,7 +920,6 @@ static void reset_page(struct page *page) | |||
| 	ClearPagePrivate(page); | ||||
| 	set_page_private(page, 0); | ||||
| 	page_mapcount_reset(page); | ||||
| 	ClearPageHugeObject(page); | ||||
| 	page->index = 0; | ||||
| } | ||||
| 
 | ||||
|  | @ -1062,7 +1061,7 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage, | |||
| 			SetPagePrivate(page); | ||||
| 			if (unlikely(class->objs_per_zspage == 1 && | ||||
| 					class->pages_per_zspage == 1)) | ||||
| 				SetPageHugeObject(page); | ||||
| 				SetZsHugePage(zspage); | ||||
| 		} else { | ||||
| 			prev_page->index = (unsigned long)page; | ||||
| 		} | ||||
|  | @ -1307,7 +1306,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, | |||
| 
 | ||||
| 	ret = __zs_map_object(area, pages, off, class->size); | ||||
| out: | ||||
| 	if (likely(!PageHugeObject(page))) | ||||
| 	if (likely(!ZsHugePage(zspage))) | ||||
| 		ret += ZS_HANDLE_SIZE; | ||||
| 
 | ||||
| 	return ret; | ||||
|  | @ -1395,7 +1394,7 @@ static unsigned long obj_malloc(struct zs_pool *pool, | |||
| 	vaddr = kmap_atomic(m_page); | ||||
| 	link = (struct link_free *)vaddr + m_offset / sizeof(*link); | ||||
| 	set_freeobj(zspage, link->next >> OBJ_TAG_BITS); | ||||
| 	if (likely(!PageHugeObject(m_page))) | ||||
| 	if (likely(!ZsHugePage(zspage))) | ||||
| 		/* record handle in the header of allocated chunk */ | ||||
| 		link->handle = handle; | ||||
| 	else | ||||
|  | @ -1496,7 +1495,10 @@ static void obj_free(int class_size, unsigned long obj) | |||
| 
 | ||||
| 	/* Insert this object in containing zspage's freelist */ | ||||
| 	link = (struct link_free *)(vaddr + f_offset); | ||||
| 	link->next = get_freeobj(zspage) << OBJ_TAG_BITS; | ||||
| 	if (likely(!ZsHugePage(zspage))) | ||||
| 		link->next = get_freeobj(zspage) << OBJ_TAG_BITS; | ||||
| 	else | ||||
| 		f_page->index = 0; | ||||
| 	kunmap_atomic(vaddr); | ||||
| 	set_freeobj(zspage, f_objidx); | ||||
| 	mod_zspage_inuse(zspage, -1); | ||||
|  | @ -1867,7 +1869,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage, | |||
| 
 | ||||
| 	create_page_chain(class, zspage, pages); | ||||
| 	set_first_obj_offset(newpage, get_first_obj_offset(oldpage)); | ||||
| 	if (unlikely(PageHugeObject(oldpage))) | ||||
| 	if (unlikely(ZsHugePage(zspage))) | ||||
| 		newpage->index = oldpage->index; | ||||
| 	__SetPageMovable(newpage, page_mapping(oldpage)); | ||||
| } | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Minchan Kim
						Minchan Kim