mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	 be7052a4b5
			
		
	
	
		be7052a4b5
		
	
	
	
	
		
			
			Convert net.c to use struct iou_vec. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/6437b57dabed44eca708c02e390529c7ed211c78.1741362889.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
		
			
				
	
	
		
			68 lines
		
	
	
	
		
			1.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			68 lines
		
	
	
	
		
			1.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef IOU_ALLOC_CACHE_H
 | |
| #define IOU_ALLOC_CACHE_H
 | |
| 
 | |
| #include <linux/io_uring_types.h>
 | |
| 
 | |
| /*
 | |
|  * Don't allow the cache to grow beyond this size.
 | |
|  */
 | |
| #define IO_ALLOC_CACHE_MAX	128
 | |
| 
 | |
| void io_alloc_cache_free(struct io_alloc_cache *cache,
 | |
| 			 void (*free)(const void *));
 | |
| bool io_alloc_cache_init(struct io_alloc_cache *cache,
 | |
| 			 unsigned max_nr, unsigned int size,
 | |
| 			 unsigned int init_bytes);
 | |
| 
 | |
| void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp);
 | |
| 
 | |
| static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
 | |
| 				      void *entry)
 | |
| {
 | |
| 	if (cache->nr_cached < cache->max_cached) {
 | |
| 		if (!kasan_mempool_poison_object(entry))
 | |
| 			return false;
 | |
| 		cache->entries[cache->nr_cached++] = entry;
 | |
| 		return true;
 | |
| 	}
 | |
| 	return false;
 | |
| }
 | |
| 
 | |
| static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
 | |
| {
 | |
| 	if (cache->nr_cached) {
 | |
| 		void *entry = cache->entries[--cache->nr_cached];
 | |
| 
 | |
| 		/*
 | |
| 		 * If KASAN is enabled, always clear the initial bytes that
 | |
| 		 * must be zeroed post alloc, in case any of them overlap
 | |
| 		 * with KASAN storage.
 | |
| 		 */
 | |
| #if defined(CONFIG_KASAN)
 | |
| 		kasan_mempool_unpoison_object(entry, cache->elem_size);
 | |
| 		if (cache->init_clear)
 | |
| 			memset(entry, 0, cache->init_clear);
 | |
| #endif
 | |
| 		return entry;
 | |
| 	}
 | |
| 
 | |
| 	return NULL;
 | |
| }
 | |
| 
 | |
| static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
 | |
| {
 | |
| 	void *obj;
 | |
| 
 | |
| 	obj = io_alloc_cache_get(cache);
 | |
| 	if (obj)
 | |
| 		return obj;
 | |
| 	return io_cache_alloc_new(cache, gfp);
 | |
| }
 | |
| 
 | |
| static inline void io_cache_free(struct io_alloc_cache *cache, void *obj)
 | |
| {
 | |
| 	if (!io_alloc_cache_put(cache, obj))
 | |
| 		kfree(obj);
 | |
| }
 | |
| 
 | |
| #endif
 |