forked from mirrors/linux
		
	mm: allow !GFP_KERNEL allocations for kvmalloc
Support for GFP_NO{FS,IO} and __GFP_NOFAIL has been implemented by
previous patches so we can allow the support for kvmalloc.  This will
allow some external users to simplify or completely remove their
helpers.
GFP_NOWAIT semantic hasn't been supported so far but it hasn't been
explicitly documented so let's add a note about that.
ceph_kvmalloc is the first helper to be dropped and changed to kvmalloc.
Link: https://lkml.kernel.org/r/20211122153233.9924-5-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Ilya Dryomov <idryomov@gmail.com>
Cc: Jeff Layton <jlayton@kernel.org>
Cc: Neil Brown <neilb@suse.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									30d3f01191
								
							
						
					
					
						commit
						a421ef3030
					
				
					 8 changed files with 15 additions and 50 deletions
				
			
		|  | @ -295,7 +295,6 @@ extern bool libceph_compatible(void *data); | ||||||
| 
 | 
 | ||||||
| extern const char *ceph_msg_type_name(int type); | extern const char *ceph_msg_type_name(int type); | ||||||
| extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); | extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); | ||||||
| extern void *ceph_kvmalloc(size_t size, gfp_t flags); |  | ||||||
| 
 | 
 | ||||||
| struct fs_parameter; | struct fs_parameter; | ||||||
| struct fc_log; | struct fc_log; | ||||||
|  |  | ||||||
							
								
								
									
										15
									
								
								mm/util.c
									
									
									
									
									
								
							
							
						
						
									
										15
									
								
								mm/util.c
									
									
									
									
									
								
							|  | @ -549,13 +549,10 @@ EXPORT_SYMBOL(vm_mmap); | ||||||
|  * Uses kmalloc to get the memory but if the allocation fails then falls back |  * Uses kmalloc to get the memory but if the allocation fails then falls back | ||||||
|  * to the vmalloc allocator. Use kvfree for freeing the memory. |  * to the vmalloc allocator. Use kvfree for freeing the memory. | ||||||
|  * |  * | ||||||
|  * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. |  * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier. | ||||||
|  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is |  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is | ||||||
|  * preferable to the vmalloc fallback, due to visible performance drawbacks. |  * preferable to the vmalloc fallback, due to visible performance drawbacks. | ||||||
|  * |  * | ||||||
|  * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not |  | ||||||
|  * fall back to vmalloc. |  | ||||||
|  * |  | ||||||
|  * Return: pointer to the allocated memory of %NULL in case of failure |  * Return: pointer to the allocated memory of %NULL in case of failure | ||||||
|  */ |  */ | ||||||
| void *kvmalloc_node(size_t size, gfp_t flags, int node) | void *kvmalloc_node(size_t size, gfp_t flags, int node) | ||||||
|  | @ -563,13 +560,6 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) | ||||||
| 	gfp_t kmalloc_flags = flags; | 	gfp_t kmalloc_flags = flags; | ||||||
| 	void *ret; | 	void *ret; | ||||||
| 
 | 
 | ||||||
| 	/*
 |  | ||||||
| 	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) |  | ||||||
| 	 * so the given set of flags has to be compatible. |  | ||||||
| 	 */ |  | ||||||
| 	if ((flags & GFP_KERNEL) != GFP_KERNEL) |  | ||||||
| 		return kmalloc_node(size, flags, node); |  | ||||||
| 
 |  | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * We want to attempt a large physically contiguous block first because | 	 * We want to attempt a large physically contiguous block first because | ||||||
| 	 * it is less likely to fragment multiple larger blocks and therefore | 	 * it is less likely to fragment multiple larger blocks and therefore | ||||||
|  | @ -582,6 +572,9 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) | ||||||
| 
 | 
 | ||||||
| 		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) | 		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) | ||||||
| 			kmalloc_flags |= __GFP_NORETRY; | 			kmalloc_flags |= __GFP_NORETRY; | ||||||
|  | 
 | ||||||
|  | 		/* nofail semantic is implemented by the vmalloc fallback */ | ||||||
|  | 		kmalloc_flags &= ~__GFP_NOFAIL; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ret = kmalloc_node(size, kmalloc_flags, node); | 	ret = kmalloc_node(size, kmalloc_flags, node); | ||||||
|  |  | ||||||
|  | @ -7,7 +7,7 @@ | ||||||
| 
 | 
 | ||||||
| #include <linux/ceph/buffer.h> | #include <linux/ceph/buffer.h> | ||||||
| #include <linux/ceph/decode.h> | #include <linux/ceph/decode.h> | ||||||
| #include <linux/ceph/libceph.h> /* for ceph_kvmalloc */ | #include <linux/ceph/libceph.h> /* for kvmalloc */ | ||||||
| 
 | 
 | ||||||
| struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) | struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) | ||||||
| { | { | ||||||
|  | @ -17,7 +17,7 @@ struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) | ||||||
| 	if (!b) | 	if (!b) | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 
 | 
 | ||||||
| 	b->vec.iov_base = ceph_kvmalloc(len, gfp); | 	b->vec.iov_base = kvmalloc(len, gfp); | ||||||
| 	if (!b->vec.iov_base) { | 	if (!b->vec.iov_base) { | ||||||
| 		kfree(b); | 		kfree(b); | ||||||
| 		return NULL; | 		return NULL; | ||||||
|  |  | ||||||
|  | @ -190,33 +190,6 @@ int ceph_compare_options(struct ceph_options *new_opt, | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(ceph_compare_options); | EXPORT_SYMBOL(ceph_compare_options); | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * kvmalloc() doesn't fall back to the vmalloc allocator unless flags are |  | ||||||
|  * compatible with (a superset of) GFP_KERNEL.  This is because while the |  | ||||||
|  * actual pages are allocated with the specified flags, the page table pages |  | ||||||
|  * are always allocated with GFP_KERNEL. |  | ||||||
|  * |  | ||||||
|  * ceph_kvmalloc() may be called with GFP_KERNEL, GFP_NOFS or GFP_NOIO. |  | ||||||
|  */ |  | ||||||
| void *ceph_kvmalloc(size_t size, gfp_t flags) |  | ||||||
| { |  | ||||||
| 	void *p; |  | ||||||
| 
 |  | ||||||
| 	if ((flags & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) { |  | ||||||
| 		p = kvmalloc(size, flags); |  | ||||||
| 	} else if ((flags & (__GFP_IO | __GFP_FS)) == __GFP_IO) { |  | ||||||
| 		unsigned int nofs_flag = memalloc_nofs_save(); |  | ||||||
| 		p = kvmalloc(size, GFP_KERNEL); |  | ||||||
| 		memalloc_nofs_restore(nofs_flag); |  | ||||||
| 	} else { |  | ||||||
| 		unsigned int noio_flag = memalloc_noio_save(); |  | ||||||
| 		p = kvmalloc(size, GFP_KERNEL); |  | ||||||
| 		memalloc_noio_restore(noio_flag); |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return p; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static int parse_fsid(const char *str, struct ceph_fsid *fsid) | static int parse_fsid(const char *str, struct ceph_fsid *fsid) | ||||||
| { | { | ||||||
| 	int i = 0; | 	int i = 0; | ||||||
|  |  | ||||||
|  | @ -147,7 +147,7 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key) | ||||||
| static const u8 *aes_iv = (u8 *)CEPH_AES_IV; | static const u8 *aes_iv = (u8 *)CEPH_AES_IV; | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Should be used for buffers allocated with ceph_kvmalloc(). |  * Should be used for buffers allocated with kvmalloc(). | ||||||
|  * Currently these are encrypt out-buffer (ceph_buffer) and decrypt |  * Currently these are encrypt out-buffer (ceph_buffer) and decrypt | ||||||
|  * in-buffer (msg front). |  * in-buffer (msg front). | ||||||
|  * |  * | ||||||
|  |  | ||||||
|  | @ -1920,7 +1920,7 @@ struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items, | ||||||
| 
 | 
 | ||||||
| 	/* front */ | 	/* front */ | ||||||
| 	if (front_len) { | 	if (front_len) { | ||||||
| 		m->front.iov_base = ceph_kvmalloc(front_len, flags); | 		m->front.iov_base = kvmalloc(front_len, flags); | ||||||
| 		if (m->front.iov_base == NULL) { | 		if (m->front.iov_base == NULL) { | ||||||
| 			dout("ceph_msg_new can't allocate %d bytes\n", | 			dout("ceph_msg_new can't allocate %d bytes\n", | ||||||
| 			     front_len); | 			     front_len); | ||||||
|  |  | ||||||
|  | @ -308,7 +308,7 @@ static void *alloc_conn_buf(struct ceph_connection *con, int len) | ||||||
| 	if (WARN_ON(con->v2.conn_buf_cnt >= ARRAY_SIZE(con->v2.conn_bufs))) | 	if (WARN_ON(con->v2.conn_buf_cnt >= ARRAY_SIZE(con->v2.conn_bufs))) | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 
 | 
 | ||||||
| 	buf = ceph_kvmalloc(len, GFP_NOIO); | 	buf = kvmalloc(len, GFP_NOIO); | ||||||
| 	if (!buf) | 	if (!buf) | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -980,7 +980,7 @@ static struct crush_work *alloc_workspace(const struct crush_map *c) | ||||||
| 	work_size = crush_work_size(c, CEPH_PG_MAX_SIZE); | 	work_size = crush_work_size(c, CEPH_PG_MAX_SIZE); | ||||||
| 	dout("%s work_size %zu bytes\n", __func__, work_size); | 	dout("%s work_size %zu bytes\n", __func__, work_size); | ||||||
| 
 | 
 | ||||||
| 	work = ceph_kvmalloc(work_size, GFP_NOIO); | 	work = kvmalloc(work_size, GFP_NOIO); | ||||||
| 	if (!work) | 	if (!work) | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 
 | 
 | ||||||
|  | @ -1190,9 +1190,9 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max) | ||||||
| 	if (max == map->max_osd) | 	if (max == map->max_osd) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	state = ceph_kvmalloc(array_size(max, sizeof(*state)), GFP_NOFS); | 	state = kvmalloc(array_size(max, sizeof(*state)), GFP_NOFS); | ||||||
| 	weight = ceph_kvmalloc(array_size(max, sizeof(*weight)), GFP_NOFS); | 	weight = kvmalloc(array_size(max, sizeof(*weight)), GFP_NOFS); | ||||||
| 	addr = ceph_kvmalloc(array_size(max, sizeof(*addr)), GFP_NOFS); | 	addr = kvmalloc(array_size(max, sizeof(*addr)), GFP_NOFS); | ||||||
| 	if (!state || !weight || !addr) { | 	if (!state || !weight || !addr) { | ||||||
| 		kvfree(state); | 		kvfree(state); | ||||||
| 		kvfree(weight); | 		kvfree(weight); | ||||||
|  | @ -1222,7 +1222,7 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max) | ||||||
| 	if (map->osd_primary_affinity) { | 	if (map->osd_primary_affinity) { | ||||||
| 		u32 *affinity; | 		u32 *affinity; | ||||||
| 
 | 
 | ||||||
| 		affinity = ceph_kvmalloc(array_size(max, sizeof(*affinity)), | 		affinity = kvmalloc(array_size(max, sizeof(*affinity)), | ||||||
| 					 GFP_NOFS); | 					 GFP_NOFS); | ||||||
| 		if (!affinity) | 		if (!affinity) | ||||||
| 			return -ENOMEM; | 			return -ENOMEM; | ||||||
|  | @ -1503,7 +1503,7 @@ static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff) | ||||||
| 	if (!map->osd_primary_affinity) { | 	if (!map->osd_primary_affinity) { | ||||||
| 		int i; | 		int i; | ||||||
| 
 | 
 | ||||||
| 		map->osd_primary_affinity = ceph_kvmalloc( | 		map->osd_primary_affinity = kvmalloc( | ||||||
| 		    array_size(map->max_osd, sizeof(*map->osd_primary_affinity)), | 		    array_size(map->max_osd, sizeof(*map->osd_primary_affinity)), | ||||||
| 		    GFP_NOFS); | 		    GFP_NOFS); | ||||||
| 		if (!map->osd_primary_affinity) | 		if (!map->osd_primary_affinity) | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Michal Hocko
						Michal Hocko