mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: swap: fix vmstats for huge pages
Many of the callbacks called by pagevec_lru_move_fn() does not correctly update the vmstats for huge pages. Fix that. Also __pagevec_lru_add_fn() use the irq-unsafe alternative to update the stat as the irqs are already disabled. Signed-off-by: Shakeel Butt <shakeelb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Link: http://lkml.kernel.org/r/20200527182916.249910-1-shakeelb@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									d483a5dd00
								
							
						
					
					
						commit
						5d91f31faf
					
				
					 1 changed files with 8 additions and 6 deletions
				
			
		
							
								
								
									
										14
									
								
								mm/swap.c
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								mm/swap.c
									
									
									
									
									
								
							| 
						 | 
					@ -241,7 +241,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
 | 
				
			||||||
		del_page_from_lru_list(page, lruvec, page_lru(page));
 | 
							del_page_from_lru_list(page, lruvec, page_lru(page));
 | 
				
			||||||
		ClearPageActive(page);
 | 
							ClearPageActive(page);
 | 
				
			||||||
		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
 | 
							add_page_to_lru_list_tail(page, lruvec, page_lru(page));
 | 
				
			||||||
		(*pgmoved)++;
 | 
							(*pgmoved) += hpage_nr_pages(page);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -327,7 +327,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
 | 
				
			||||||
		add_page_to_lru_list(page, lruvec, lru);
 | 
							add_page_to_lru_list(page, lruvec, lru);
 | 
				
			||||||
		trace_mm_lru_activate(page);
 | 
							trace_mm_lru_activate(page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		__count_vm_event(PGACTIVATE);
 | 
							__count_vm_events(PGACTIVATE, hpage_nr_pages(page));
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -529,6 +529,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int lru;
 | 
						int lru;
 | 
				
			||||||
	bool active;
 | 
						bool active;
 | 
				
			||||||
 | 
						int nr_pages = hpage_nr_pages(page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!PageLRU(page))
 | 
						if (!PageLRU(page))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
| 
						 | 
					@ -561,11 +562,11 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
 | 
				
			||||||
		 * We moves tha page into tail of inactive.
 | 
							 * We moves tha page into tail of inactive.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		add_page_to_lru_list_tail(page, lruvec, lru);
 | 
							add_page_to_lru_list_tail(page, lruvec, lru);
 | 
				
			||||||
		__count_vm_event(PGROTATED);
 | 
							__count_vm_events(PGROTATED, nr_pages);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (active)
 | 
						if (active)
 | 
				
			||||||
		__count_vm_event(PGDEACTIVATE);
 | 
							__count_vm_events(PGDEACTIVATE, nr_pages);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 | 
					static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 | 
				
			||||||
| 
						 | 
					@ -960,6 +961,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	enum lru_list lru;
 | 
						enum lru_list lru;
 | 
				
			||||||
	int was_unevictable = TestClearPageUnevictable(page);
 | 
						int was_unevictable = TestClearPageUnevictable(page);
 | 
				
			||||||
 | 
						int nr_pages = hpage_nr_pages(page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	VM_BUG_ON_PAGE(PageLRU(page), page);
 | 
						VM_BUG_ON_PAGE(PageLRU(page), page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -995,13 +997,13 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 | 
				
			||||||
	if (page_evictable(page)) {
 | 
						if (page_evictable(page)) {
 | 
				
			||||||
		lru = page_lru(page);
 | 
							lru = page_lru(page);
 | 
				
			||||||
		if (was_unevictable)
 | 
							if (was_unevictable)
 | 
				
			||||||
			count_vm_event(UNEVICTABLE_PGRESCUED);
 | 
								__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		lru = LRU_UNEVICTABLE;
 | 
							lru = LRU_UNEVICTABLE;
 | 
				
			||||||
		ClearPageActive(page);
 | 
							ClearPageActive(page);
 | 
				
			||||||
		SetPageUnevictable(page);
 | 
							SetPageUnevictable(page);
 | 
				
			||||||
		if (!was_unevictable)
 | 
							if (!was_unevictable)
 | 
				
			||||||
			count_vm_event(UNEVICTABLE_PGCULLED);
 | 
								__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	add_page_to_lru_list(page, lruvec, lru);
 | 
						add_page_to_lru_list(page, lruvec, lru);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue