forked from mirrors/linux
		
	btrfs: introduce submit_eb_subpage() to submit a subpage metadata page
The new function, submit_eb_subpage(), will submit all the dirty extent buffers in the page. The major difference between submit_eb_page() and submit_eb_subpage() is: - How to grab extent buffer Now we use find_extent_buffer_nospinlock() other than using page::private. All other different handling is already done in functions like lock_extent_buffer_for_io() and write_one_eb(). Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
		
							parent
							
								
									f3156df944
								
							
						
					
					
						commit
						c4aec299fa
					
				
					 1 changed files with 95 additions and 0 deletions
				
			
		| 
						 | 
					@ -4322,6 +4322,98 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Submit one subpage btree page.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The main difference to submit_eb_page() is:
 | 
				
			||||||
 | 
					 * - Page locking
 | 
				
			||||||
 | 
					 *   For subpage, we don't rely on page locking at all.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * - Flush write bio
 | 
				
			||||||
 | 
					 *   We only flush bio if we may be unable to fit current extent buffers into
 | 
				
			||||||
 | 
					 *   current bio.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Return >=0 for the number of submitted extent buffers.
 | 
				
			||||||
 | 
					 * Return <0 for fatal error.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static int submit_eb_subpage(struct page *page,
 | 
				
			||||||
 | 
								     struct writeback_control *wbc,
 | 
				
			||||||
 | 
								     struct extent_page_data *epd)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
 | 
				
			||||||
 | 
						int submitted = 0;
 | 
				
			||||||
 | 
						u64 page_start = page_offset(page);
 | 
				
			||||||
 | 
						int bit_start = 0;
 | 
				
			||||||
 | 
						const int nbits = BTRFS_SUBPAGE_BITMAP_SIZE;
 | 
				
			||||||
 | 
						int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Lock and write each dirty extent buffers in the range */
 | 
				
			||||||
 | 
						while (bit_start < nbits) {
 | 
				
			||||||
 | 
							struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
 | 
				
			||||||
 | 
							struct extent_buffer *eb;
 | 
				
			||||||
 | 
							unsigned long flags;
 | 
				
			||||||
 | 
							u64 start;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/*
 | 
				
			||||||
 | 
							 * Take private lock to ensure the subpage won't be detached
 | 
				
			||||||
 | 
							 * in the meantime.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							spin_lock(&page->mapping->private_lock);
 | 
				
			||||||
 | 
							if (!PagePrivate(page)) {
 | 
				
			||||||
 | 
								spin_unlock(&page->mapping->private_lock);
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							spin_lock_irqsave(&subpage->lock, flags);
 | 
				
			||||||
 | 
							if (!((1 << bit_start) & subpage->dirty_bitmap)) {
 | 
				
			||||||
 | 
								spin_unlock_irqrestore(&subpage->lock, flags);
 | 
				
			||||||
 | 
								spin_unlock(&page->mapping->private_lock);
 | 
				
			||||||
 | 
								bit_start++;
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							start = page_start + bit_start * fs_info->sectorsize;
 | 
				
			||||||
 | 
							bit_start += sectors_per_node;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/*
 | 
				
			||||||
 | 
							 * Here we just want to grab the eb without touching extra
 | 
				
			||||||
 | 
							 * spin locks, so call find_extent_buffer_nolock().
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							eb = find_extent_buffer_nolock(fs_info, start);
 | 
				
			||||||
 | 
							spin_unlock_irqrestore(&subpage->lock, flags);
 | 
				
			||||||
 | 
							spin_unlock(&page->mapping->private_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/*
 | 
				
			||||||
 | 
							 * The eb has already reached 0 refs thus find_extent_buffer()
 | 
				
			||||||
 | 
							 * doesn't return it. We don't need to write back such eb
 | 
				
			||||||
 | 
							 * anyway.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							if (!eb)
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							ret = lock_extent_buffer_for_io(eb, epd);
 | 
				
			||||||
 | 
							if (ret == 0) {
 | 
				
			||||||
 | 
								free_extent_buffer(eb);
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if (ret < 0) {
 | 
				
			||||||
 | 
								free_extent_buffer(eb);
 | 
				
			||||||
 | 
								goto cleanup;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							ret = write_one_eb(eb, wbc, epd);
 | 
				
			||||||
 | 
							free_extent_buffer(eb);
 | 
				
			||||||
 | 
							if (ret < 0)
 | 
				
			||||||
 | 
								goto cleanup;
 | 
				
			||||||
 | 
							submitted++;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return submitted;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					cleanup:
 | 
				
			||||||
 | 
						/* We hit error, end bio for the submitted extent buffers */
 | 
				
			||||||
 | 
						end_write_bio(epd, ret);
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Submit all page(s) of one extent buffer.
 | 
					 * Submit all page(s) of one extent buffer.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
| 
						 | 
					@ -4354,6 +4446,9 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
 | 
				
			||||||
	if (!PagePrivate(page))
 | 
						if (!PagePrivate(page))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
 | 
				
			||||||
 | 
							return submit_eb_subpage(page, wbc, epd);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock(&mapping->private_lock);
 | 
						spin_lock(&mapping->private_lock);
 | 
				
			||||||
	if (!PagePrivate(page)) {
 | 
						if (!PagePrivate(page)) {
 | 
				
			||||||
		spin_unlock(&mapping->private_lock);
 | 
							spin_unlock(&mapping->private_lock);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue