mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mm: zswap: support exclusive loads
Commit 71024cb4a0 ("frontswap: remove frontswap_tmem_exclusive_gets")
removed support for exclusive loads from frontswap as it was not used. 
Bring back exclusive loads support to frontswap by adding an "exclusive"
output parameter to frontswap_ops->load.
On the zswap side, add a module parameter to enable/disable exclusive
loads, and a config option to control the boot default value.  Refactor
zswap entry invalidation in zswap_frontswap_invalidate_page() into
zswap_invalidate_entry() to reuse it in zswap_frontswap_load() if
exclusive loads are enabled.
With exclusive loads, we avoid having two copies of the same page in
memory (compressed & uncompressed) after faulting it in from zswap.  On
the other hand, if the page is to be reclaimed again without being
dirtied, it will be re-compressed.  Compression is not usually slow, and a
page that was just faulted in is less likely to be reclaimed again soon.
Link: https://lkml.kernel.org/r/20230607195143.1473802-1-yosryahmed@google.com
Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
Suggested-by: Yu Zhao <yuzhao@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									32b6a4a174
								
							
						
					
					
						commit
						b9c91c4341
					
				
					 4 changed files with 45 additions and 11 deletions
				
			
		| 
						 | 
				
			
			@ -10,7 +10,7 @@
 | 
			
		|||
struct frontswap_ops {
 | 
			
		||||
	void (*init)(unsigned); /* this swap type was just swapon'ed */
 | 
			
		||||
	int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
 | 
			
		||||
	int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
 | 
			
		||||
	int (*load)(unsigned, pgoff_t, struct page *, bool *); /* load a page */
 | 
			
		||||
	void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
 | 
			
		||||
	void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										16
									
								
								mm/Kconfig
									
									
									
									
									
								
							
							
						
						
									
										16
									
								
								mm/Kconfig
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -46,6 +46,22 @@ config ZSWAP_DEFAULT_ON
 | 
			
		|||
	  The selection made here can be overridden by using the kernel
 | 
			
		||||
	  command line 'zswap.enabled=' option.
 | 
			
		||||
 | 
			
		||||
config ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON
 | 
			
		||||
	bool "Invalidate zswap entries when pages are loaded"
 | 
			
		||||
	depends on ZSWAP
 | 
			
		||||
	help
 | 
			
		||||
	  If selected, exclusive loads for zswap will be enabled at boot,
 | 
			
		||||
	  otherwise it will be disabled.
 | 
			
		||||
 | 
			
		||||
	  If exclusive loads are enabled, when a page is loaded from zswap,
 | 
			
		||||
	  the zswap entry is invalidated at once, as opposed to leaving it
 | 
			
		||||
	  in zswap until the swap entry is freed.
 | 
			
		||||
 | 
			
		||||
	  This avoids having two copies of the same page in memory
 | 
			
		||||
	  (compressed and uncompressed) after faulting in a page from zswap.
 | 
			
		||||
	  The cost is that if the page was never dirtied and needs to be
 | 
			
		||||
	  swapped out again, it will be re-compressed.
 | 
			
		||||
 | 
			
		||||
choice
 | 
			
		||||
	prompt "Default compressor"
 | 
			
		||||
	depends on ZSWAP
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -206,6 +206,7 @@ int __frontswap_load(struct page *page)
 | 
			
		|||
	int type = swp_type(entry);
 | 
			
		||||
	struct swap_info_struct *sis = swap_info[type];
 | 
			
		||||
	pgoff_t offset = swp_offset(entry);
 | 
			
		||||
	bool exclusive = false;
 | 
			
		||||
 | 
			
		||||
	VM_BUG_ON(!frontswap_ops);
 | 
			
		||||
	VM_BUG_ON(!PageLocked(page));
 | 
			
		||||
| 
						 | 
				
			
			@ -215,9 +216,14 @@ int __frontswap_load(struct page *page)
 | 
			
		|||
		return -1;
 | 
			
		||||
 | 
			
		||||
	/* Try loading from each implementation, until one succeeds. */
 | 
			
		||||
	ret = frontswap_ops->load(type, offset, page);
 | 
			
		||||
	if (ret == 0)
 | 
			
		||||
	ret = frontswap_ops->load(type, offset, page, &exclusive);
 | 
			
		||||
	if (ret == 0) {
 | 
			
		||||
		inc_frontswap_loads();
 | 
			
		||||
		if (exclusive) {
 | 
			
		||||
			SetPageDirty(page);
 | 
			
		||||
			__frontswap_clear(sis, offset);
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										28
									
								
								mm/zswap.c
									
									
									
									
									
								
							
							
						
						
									
										28
									
								
								mm/zswap.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -138,6 +138,10 @@ static bool zswap_non_same_filled_pages_enabled = true;
 | 
			
		|||
module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
 | 
			
		||||
		   bool, 0644);
 | 
			
		||||
 | 
			
		||||
static bool zswap_exclusive_loads_enabled = IS_ENABLED(
 | 
			
		||||
		CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
 | 
			
		||||
module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
 | 
			
		||||
 | 
			
		||||
/*********************************
 | 
			
		||||
* data structures
 | 
			
		||||
**********************************/
 | 
			
		||||
| 
						 | 
				
			
			@ -1340,12 +1344,22 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
 | 
			
		|||
	goto reject;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void zswap_invalidate_entry(struct zswap_tree *tree,
 | 
			
		||||
				   struct zswap_entry *entry)
 | 
			
		||||
{
 | 
			
		||||
	/* remove from rbtree */
 | 
			
		||||
	zswap_rb_erase(&tree->rbroot, entry);
 | 
			
		||||
 | 
			
		||||
	/* drop the initial reference from entry creation */
 | 
			
		||||
	zswap_entry_put(tree, entry);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * returns 0 if the page was successfully decompressed
 | 
			
		||||
 * return -1 on entry not found or error
 | 
			
		||||
*/
 | 
			
		||||
static int zswap_frontswap_load(unsigned type, pgoff_t offset,
 | 
			
		||||
				struct page *page)
 | 
			
		||||
				struct page *page, bool *exclusive)
 | 
			
		||||
{
 | 
			
		||||
	struct zswap_tree *tree = zswap_trees[type];
 | 
			
		||||
	struct zswap_entry *entry;
 | 
			
		||||
| 
						 | 
				
			
			@ -1415,6 +1429,10 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
 | 
			
		|||
freeentry:
 | 
			
		||||
	spin_lock(&tree->lock);
 | 
			
		||||
	zswap_entry_put(tree, entry);
 | 
			
		||||
	if (!ret && zswap_exclusive_loads_enabled) {
 | 
			
		||||
		zswap_invalidate_entry(tree, entry);
 | 
			
		||||
		*exclusive = true;
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock(&tree->lock);
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -1434,13 +1452,7 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
 | 
			
		|||
		spin_unlock(&tree->lock);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* remove from rbtree */
 | 
			
		||||
	zswap_rb_erase(&tree->rbroot, entry);
 | 
			
		||||
 | 
			
		||||
	/* drop the initial reference from entry creation */
 | 
			
		||||
	zswap_entry_put(tree, entry);
 | 
			
		||||
 | 
			
		||||
	zswap_invalidate_entry(tree, entry);
 | 
			
		||||
	spin_unlock(&tree->lock);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue