mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm/truncate: batch-clear shadow entries
Make clear_shadow_entry() clear shadow entries in `struct folio_batch` so
that it can reduce contention on i_lock and i_pages locks, e.g.,
  watchdog: BUG: soft lockup - CPU#29 stuck for 11s! [fio:2701649]
    clear_shadow_entry+0x3d/0x100
    mapping_try_invalidate+0x117/0x1d0
    invalidate_mapping_pages+0x10/0x20
    invalidate_bdev+0x3c/0x50
    blkdev_common_ioctl+0x5f7/0xa90
    blkdev_ioctl+0x109/0x270
Also, rename clear_shadow_entry() to clear_shadow_entries() accordingly.
[yuzhao@google.com: v2]
  Link: https://lkml.kernel.org/r/20240710060933.3979380-1-yuzhao@google.com
Link: https://lkml.kernel.org/r/20240708212753.3120511-1-yuzhao@google.com
Reported-by: Bharata B Rao <bharata@amd.com>
Closes: https://lore.kernel.org/d2841226-e27b-4d3d-a578-63587a3aa4f3@amd.com/
Signed-off-by: Yu Zhao <yuzhao@google.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									8a78882dac
								
							
						
					
					
						commit
						61c663e020
					
				
					 1 changed files with 31 additions and 37 deletions
				
			
		| 
						 | 
					@ -39,12 +39,25 @@ static inline void __clear_shadow_entry(struct address_space *mapping,
 | 
				
			||||||
	xas_store(&xas, NULL);
 | 
						xas_store(&xas, NULL);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
 | 
					static void clear_shadow_entries(struct address_space *mapping,
 | 
				
			||||||
			       void *entry)
 | 
									 struct folio_batch *fbatch, pgoff_t *indices)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Handled by shmem itself, or for DAX we do nothing. */
 | 
				
			||||||
 | 
						if (shmem_mapping(mapping) || dax_mapping(mapping))
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock(&mapping->host->i_lock);
 | 
						spin_lock(&mapping->host->i_lock);
 | 
				
			||||||
	xa_lock_irq(&mapping->i_pages);
 | 
						xa_lock_irq(&mapping->i_pages);
 | 
				
			||||||
	__clear_shadow_entry(mapping, index, entry);
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < folio_batch_count(fbatch); i++) {
 | 
				
			||||||
 | 
							struct folio *folio = fbatch->folios[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (xa_is_value(folio))
 | 
				
			||||||
 | 
								__clear_shadow_entry(mapping, indices[i], folio);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	xa_unlock_irq(&mapping->i_pages);
 | 
						xa_unlock_irq(&mapping->i_pages);
 | 
				
			||||||
	if (mapping_shrinkable(mapping))
 | 
						if (mapping_shrinkable(mapping))
 | 
				
			||||||
		inode_add_lru(mapping->host);
 | 
							inode_add_lru(mapping->host);
 | 
				
			||||||
| 
						 | 
					@ -105,36 +118,6 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
 | 
				
			||||||
	fbatch->nr = j;
 | 
						fbatch->nr = j;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Invalidate exceptional entry if easily possible. This handles exceptional
 | 
					 | 
				
			||||||
 * entries for invalidate_inode_pages().
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static int invalidate_exceptional_entry(struct address_space *mapping,
 | 
					 | 
				
			||||||
					pgoff_t index, void *entry)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/* Handled by shmem itself, or for DAX we do nothing. */
 | 
					 | 
				
			||||||
	if (shmem_mapping(mapping) || dax_mapping(mapping))
 | 
					 | 
				
			||||||
		return 1;
 | 
					 | 
				
			||||||
	clear_shadow_entry(mapping, index, entry);
 | 
					 | 
				
			||||||
	return 1;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Invalidate exceptional entry if clean. This handles exceptional entries for
 | 
					 | 
				
			||||||
 * invalidate_inode_pages2() so for DAX it evicts only clean entries.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static int invalidate_exceptional_entry2(struct address_space *mapping,
 | 
					 | 
				
			||||||
					 pgoff_t index, void *entry)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/* Handled by shmem itself */
 | 
					 | 
				
			||||||
	if (shmem_mapping(mapping))
 | 
					 | 
				
			||||||
		return 1;
 | 
					 | 
				
			||||||
	if (dax_mapping(mapping))
 | 
					 | 
				
			||||||
		return dax_invalidate_mapping_entry_sync(mapping, index);
 | 
					 | 
				
			||||||
	clear_shadow_entry(mapping, index, entry);
 | 
					 | 
				
			||||||
	return 1;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * folio_invalidate - Invalidate part or all of a folio.
 | 
					 * folio_invalidate - Invalidate part or all of a folio.
 | 
				
			||||||
 * @folio: The folio which is affected.
 | 
					 * @folio: The folio which is affected.
 | 
				
			||||||
| 
						 | 
					@ -494,6 +477,7 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
 | 
				
			||||||
	unsigned long ret;
 | 
						unsigned long ret;
 | 
				
			||||||
	unsigned long count = 0;
 | 
						unsigned long count = 0;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
						bool xa_has_values = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	folio_batch_init(&fbatch);
 | 
						folio_batch_init(&fbatch);
 | 
				
			||||||
	while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
 | 
						while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
 | 
				
			||||||
| 
						 | 
					@ -503,8 +487,8 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
 | 
				
			||||||
			/* We rely upon deletion not changing folio->index */
 | 
								/* We rely upon deletion not changing folio->index */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (xa_is_value(folio)) {
 | 
								if (xa_is_value(folio)) {
 | 
				
			||||||
				count += invalidate_exceptional_entry(mapping,
 | 
									xa_has_values = true;
 | 
				
			||||||
							     indices[i], folio);
 | 
									count++;
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -522,6 +506,10 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			count += ret;
 | 
								count += ret;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (xa_has_values)
 | 
				
			||||||
 | 
								clear_shadow_entries(mapping, &fbatch, indices);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		folio_batch_remove_exceptionals(&fbatch);
 | 
							folio_batch_remove_exceptionals(&fbatch);
 | 
				
			||||||
		folio_batch_release(&fbatch);
 | 
							folio_batch_release(&fbatch);
 | 
				
			||||||
		cond_resched();
 | 
							cond_resched();
 | 
				
			||||||
| 
						 | 
					@ -616,6 +604,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 | 
				
			||||||
	int ret = 0;
 | 
						int ret = 0;
 | 
				
			||||||
	int ret2 = 0;
 | 
						int ret2 = 0;
 | 
				
			||||||
	int did_range_unmap = 0;
 | 
						int did_range_unmap = 0;
 | 
				
			||||||
 | 
						bool xa_has_values = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (mapping_empty(mapping))
 | 
						if (mapping_empty(mapping))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
| 
						 | 
					@ -629,8 +618,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 | 
				
			||||||
			/* We rely upon deletion not changing folio->index */
 | 
								/* We rely upon deletion not changing folio->index */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (xa_is_value(folio)) {
 | 
								if (xa_is_value(folio)) {
 | 
				
			||||||
				if (!invalidate_exceptional_entry2(mapping,
 | 
									xa_has_values = true;
 | 
				
			||||||
						indices[i], folio))
 | 
									if (dax_mapping(mapping) &&
 | 
				
			||||||
 | 
									    !dax_invalidate_mapping_entry_sync(mapping, indices[i]))
 | 
				
			||||||
					ret = -EBUSY;
 | 
										ret = -EBUSY;
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
| 
						 | 
					@ -666,6 +656,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 | 
				
			||||||
				ret = ret2;
 | 
									ret = ret2;
 | 
				
			||||||
			folio_unlock(folio);
 | 
								folio_unlock(folio);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (xa_has_values)
 | 
				
			||||||
 | 
								clear_shadow_entries(mapping, &fbatch, indices);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		folio_batch_remove_exceptionals(&fbatch);
 | 
							folio_batch_remove_exceptionals(&fbatch);
 | 
				
			||||||
		folio_batch_release(&fbatch);
 | 
							folio_batch_release(&fbatch);
 | 
				
			||||||
		cond_resched();
 | 
							cond_resched();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue