mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	btrfs: zoned: optimize hint byte for zoned allocator
Writing sequentially to a huge file on btrfs on a SMR HDD revealed a
decline of the performance (220 MiB/s to 30 MiB/s after 500 minutes).
The performance goes down because of increased latency of the extent
allocation, which is induced by a traversing of a lot of full block groups.
So, this patch optimizes the ffe_ctl->hint_byte by choosing a block group
with sufficient size from the active block group list, which does not
contain full block groups.
After applying the patch, the performance is maintained well.
Fixes: 2eda57089e ("btrfs: zoned: implement sequential extent allocation")
CC: stable@vger.kernel.org # 5.15+
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
			
			
This commit is contained in:
		
							parent
							
								
									b271fee9a4
								
							
						
					
					
						commit
						02444f2ac2
					
				
					 1 changed files with 18 additions and 0 deletions
				
			
		| 
						 | 
					@ -4311,6 +4311,24 @@ static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
 | 
				
			||||||
		if (fs_info->data_reloc_bg)
 | 
							if (fs_info->data_reloc_bg)
 | 
				
			||||||
			ffe_ctl->hint_byte = fs_info->data_reloc_bg;
 | 
								ffe_ctl->hint_byte = fs_info->data_reloc_bg;
 | 
				
			||||||
		spin_unlock(&fs_info->relocation_bg_lock);
 | 
							spin_unlock(&fs_info->relocation_bg_lock);
 | 
				
			||||||
 | 
						} else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
 | 
				
			||||||
 | 
							struct btrfs_block_group *block_group;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							spin_lock(&fs_info->zone_active_bgs_lock);
 | 
				
			||||||
 | 
							list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
 | 
				
			||||||
 | 
								/*
 | 
				
			||||||
 | 
								 * No lock is OK here because avail is monotinically
 | 
				
			||||||
 | 
								 * decreasing, and this is just a hint.
 | 
				
			||||||
 | 
								 */
 | 
				
			||||||
 | 
								u64 avail = block_group->zone_capacity - block_group->alloc_offset;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								if (block_group_bits(block_group, ffe_ctl->flags) &&
 | 
				
			||||||
 | 
								    avail >= ffe_ctl->num_bytes) {
 | 
				
			||||||
 | 
									ffe_ctl->hint_byte = block_group->start;
 | 
				
			||||||
 | 
									break;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							spin_unlock(&fs_info->zone_active_bgs_lock);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue