mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	btrfs: compression: drop kmap/kunmap from lzo
As we don't use highmem pages anymore, drop the kmap/kunmap. The kmap is simply page_address and kunmap is a no-op. Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
		
							parent
							
								
									b0ee5e1ec4
								
							
						
					
					
						commit
						8c945d32e6
					
				
					 1 changed files with 10 additions and 28 deletions
				
			
		|  | @ -140,7 +140,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, | |||
| 	*total_in = 0; | ||||
| 
 | ||||
| 	in_page = find_get_page(mapping, start >> PAGE_SHIFT); | ||||
| 	data_in = kmap(in_page); | ||||
| 	data_in = page_address(in_page); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * store the size of all chunks of compressed data in | ||||
|  | @ -151,7 +151,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, | |||
| 		ret = -ENOMEM; | ||||
| 		goto out; | ||||
| 	} | ||||
| 	cpage_out = kmap(out_page); | ||||
| 	cpage_out = page_address(out_page); | ||||
| 	out_offset = LZO_LEN; | ||||
| 	tot_out = LZO_LEN; | ||||
| 	pages[0] = out_page; | ||||
|  | @ -209,7 +209,6 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, | |||
| 				if (out_len == 0 && tot_in >= len) | ||||
| 					break; | ||||
| 
 | ||||
| 				kunmap(out_page); | ||||
| 				if (nr_pages == nr_dest_pages) { | ||||
| 					out_page = NULL; | ||||
| 					ret = -E2BIG; | ||||
|  | @ -221,7 +220,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, | |||
| 					ret = -ENOMEM; | ||||
| 					goto out; | ||||
| 				} | ||||
| 				cpage_out = kmap(out_page); | ||||
| 				cpage_out = page_address(out_page); | ||||
| 				pages[nr_pages++] = out_page; | ||||
| 
 | ||||
| 				pg_bytes_left = PAGE_SIZE; | ||||
|  | @ -243,12 +242,11 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, | |||
| 			break; | ||||
| 
 | ||||
| 		bytes_left = len - tot_in; | ||||
| 		kunmap(in_page); | ||||
| 		put_page(in_page); | ||||
| 
 | ||||
| 		start += PAGE_SIZE; | ||||
| 		in_page = find_get_page(mapping, start >> PAGE_SHIFT); | ||||
| 		data_in = kmap(in_page); | ||||
| 		data_in = page_address(in_page); | ||||
| 		in_len = min(bytes_left, PAGE_SIZE); | ||||
| 	} | ||||
| 
 | ||||
|  | @ -258,22 +256,17 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, | |||
| 	} | ||||
| 
 | ||||
| 	/* store the size of all chunks of compressed data */ | ||||
| 	sizes_ptr = kmap_local_page(pages[0]); | ||||
| 	sizes_ptr = page_address(pages[0]); | ||||
| 	write_compress_length(sizes_ptr, tot_out); | ||||
| 	kunmap_local(sizes_ptr); | ||||
| 
 | ||||
| 	ret = 0; | ||||
| 	*total_out = tot_out; | ||||
| 	*total_in = tot_in; | ||||
| out: | ||||
| 	*out_pages = nr_pages; | ||||
| 	if (out_page) | ||||
| 		kunmap(out_page); | ||||
| 
 | ||||
| 	if (in_page) { | ||||
| 		kunmap(in_page); | ||||
| 	if (in_page) | ||||
| 		put_page(in_page); | ||||
| 	} | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
|  | @ -299,12 +292,11 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) | |||
| 	unsigned long tot_out; | ||||
| 	unsigned long tot_len; | ||||
| 	char *buf; | ||||
| 	bool may_late_unmap, need_unmap; | ||||
| 	struct page **pages_in = cb->compressed_pages; | ||||
| 	u64 disk_start = cb->start; | ||||
| 	struct bio *orig_bio = cb->orig_bio; | ||||
| 
 | ||||
| 	data_in = kmap(pages_in[0]); | ||||
| 	data_in = page_address(pages_in[0]); | ||||
| 	tot_len = read_compress_length(data_in); | ||||
| 	/*
 | ||||
| 	 * Compressed data header check. | ||||
|  | @ -345,13 +337,11 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) | |||
| 
 | ||||
| 		tot_in += in_len; | ||||
| 		working_bytes = in_len; | ||||
| 		may_late_unmap = need_unmap = false; | ||||
| 
 | ||||
| 		/* fast path: avoid using the working buffer */ | ||||
| 		if (in_page_bytes_left >= in_len) { | ||||
| 			buf = data_in + in_offset; | ||||
| 			bytes = in_len; | ||||
| 			may_late_unmap = true; | ||||
| 			goto cont; | ||||
| 		} | ||||
| 
 | ||||
|  | @ -381,12 +371,8 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) | |||
| 					goto done; | ||||
| 				} | ||||
| 
 | ||||
| 				if (may_late_unmap) | ||||
| 					need_unmap = true; | ||||
| 				else | ||||
| 					kunmap(pages_in[page_in_index]); | ||||
| 
 | ||||
| 				data_in = kmap(pages_in[++page_in_index]); | ||||
| 				page_in_index++; | ||||
| 				data_in = page_address(pages_in[page_in_index]); | ||||
| 
 | ||||
| 				in_page_bytes_left = PAGE_SIZE; | ||||
| 				in_offset = 0; | ||||
|  | @ -396,8 +382,6 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) | |||
| 		out_len = max_segment_len; | ||||
| 		ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, | ||||
| 					    &out_len); | ||||
| 		if (need_unmap) | ||||
| 			kunmap(pages_in[page_in_index - 1]); | ||||
| 		if (ret != LZO_E_OK) { | ||||
| 			pr_warn("BTRFS: decompress failed\n"); | ||||
| 			ret = -EIO; | ||||
|  | @ -413,7 +397,6 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) | |||
| 			break; | ||||
| 	} | ||||
| done: | ||||
| 	kunmap(pages_in[page_in_index]); | ||||
| 	if (!ret) | ||||
| 		zero_fill_bio(orig_bio); | ||||
| 	return ret; | ||||
|  | @ -466,7 +449,7 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in, | |||
| 	destlen = min_t(unsigned long, destlen, PAGE_SIZE); | ||||
| 	bytes = min_t(unsigned long, destlen, out_len - start_byte); | ||||
| 
 | ||||
| 	kaddr = kmap_local_page(dest_page); | ||||
| 	kaddr = page_address(dest_page); | ||||
| 	memcpy(kaddr, workspace->buf + start_byte, bytes); | ||||
| 
 | ||||
| 	/*
 | ||||
|  | @ -476,7 +459,6 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in, | |||
| 	 */ | ||||
| 	if (bytes < destlen) | ||||
| 		memset(kaddr+bytes, 0, destlen-bytes); | ||||
| 	kunmap_local(kaddr); | ||||
| out: | ||||
| 	return ret; | ||||
| } | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 David Sterba
						David Sterba