mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	bcachefs: Use kvpmalloc mempools for compression bounce
This fixes an issue where mounting would fail because of memory fragmentation - previously the compression bounce buffers were using get_free_pages(). Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
		
							parent
							
								
									5a655f06c9
								
							
						
					
					
						commit
						22f776985f
					
				
					 1 changed files with 5 additions and 20 deletions
				
			
		| 
						 | 
				
			
			@ -17,7 +17,6 @@ struct bbuf {
 | 
			
		|||
		BB_NONE,
 | 
			
		||||
		BB_VMAP,
 | 
			
		||||
		BB_KMALLOC,
 | 
			
		||||
		BB_VMALLOC,
 | 
			
		||||
		BB_MEMPOOL,
 | 
			
		||||
	}		type;
 | 
			
		||||
	int		rw;
 | 
			
		||||
| 
						 | 
				
			
			@ -33,17 +32,7 @@ static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw)
 | 
			
		|||
	if (b)
 | 
			
		||||
		return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw };
 | 
			
		||||
 | 
			
		||||
	b = mempool_alloc(&c->compression_bounce[rw], GFP_NOWAIT);
 | 
			
		||||
	b = b ? page_address(b) : NULL;
 | 
			
		||||
	if (b)
 | 
			
		||||
		return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
 | 
			
		||||
 | 
			
		||||
	b = vmalloc(size);
 | 
			
		||||
	if (b)
 | 
			
		||||
		return (struct bbuf) { .b = b, .type = BB_VMALLOC, .rw = rw };
 | 
			
		||||
 | 
			
		||||
	b = mempool_alloc(&c->compression_bounce[rw], GFP_NOIO);
 | 
			
		||||
	b = b ? page_address(b) : NULL;
 | 
			
		||||
	if (b)
 | 
			
		||||
		return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -129,12 +118,8 @@ static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf)
 | 
			
		|||
	case BB_KMALLOC:
 | 
			
		||||
		kfree(buf.b);
 | 
			
		||||
		break;
 | 
			
		||||
	case BB_VMALLOC:
 | 
			
		||||
		vfree(buf.b);
 | 
			
		||||
		break;
 | 
			
		||||
	case BB_MEMPOOL:
 | 
			
		||||
		mempool_free(virt_to_page(buf.b),
 | 
			
		||||
			     &c->compression_bounce[buf.rw]);
 | 
			
		||||
		mempool_free(buf.b, &c->compression_bounce[buf.rw]);
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -561,15 +546,15 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
 | 
			
		|||
have_compressed:
 | 
			
		||||
 | 
			
		||||
	if (!mempool_initialized(&c->compression_bounce[READ])) {
 | 
			
		||||
		ret = mempool_init_page_pool(&c->compression_bounce[READ],
 | 
			
		||||
					     1, order);
 | 
			
		||||
		ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[READ],
 | 
			
		||||
						  1, order);
 | 
			
		||||
		if (ret)
 | 
			
		||||
			goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!mempool_initialized(&c->compression_bounce[WRITE])) {
 | 
			
		||||
		ret = mempool_init_page_pool(&c->compression_bounce[WRITE],
 | 
			
		||||
					     1, order);
 | 
			
		||||
		ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE],
 | 
			
		||||
						  1, order);
 | 
			
		||||
		if (ret)
 | 
			
		||||
			goto out;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue