forked from mirrors/linux
		
	KVM: MMU: make kvm_mmu_zap_page() return the number of pages it actually freed
Currently, kvm_mmu_zap_page() returning the number of freed children sp. This might confuse the caller, because caller don't know the actual freed number. Let's make kvm_mmu_zap_page() return the number of pages it actually freed. Signed-off-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
		
							parent
							
								
									518c5a05e8
								
							
						
					
					
						commit
						54a4f0239f
					
				
					 1 changed files with 3 additions and 2 deletions
				
			
		| 
						 | 
					@ -1504,6 +1504,8 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 | 
				
			||||||
	if (sp->unsync)
 | 
						if (sp->unsync)
 | 
				
			||||||
		kvm_unlink_unsync_page(kvm, sp);
 | 
							kvm_unlink_unsync_page(kvm, sp);
 | 
				
			||||||
	if (!sp->root_count) {
 | 
						if (!sp->root_count) {
 | 
				
			||||||
 | 
							/* Count self */
 | 
				
			||||||
 | 
							ret++;
 | 
				
			||||||
		hlist_del(&sp->hash_link);
 | 
							hlist_del(&sp->hash_link);
 | 
				
			||||||
		kvm_mmu_free_page(kvm, sp);
 | 
							kvm_mmu_free_page(kvm, sp);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
| 
						 | 
					@ -1540,7 +1542,6 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
 | 
				
			||||||
			page = container_of(kvm->arch.active_mmu_pages.prev,
 | 
								page = container_of(kvm->arch.active_mmu_pages.prev,
 | 
				
			||||||
					    struct kvm_mmu_page, link);
 | 
										    struct kvm_mmu_page, link);
 | 
				
			||||||
			used_pages -= kvm_mmu_zap_page(kvm, page);
 | 
								used_pages -= kvm_mmu_zap_page(kvm, page);
 | 
				
			||||||
			used_pages--;
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		kvm_nr_mmu_pages = used_pages;
 | 
							kvm_nr_mmu_pages = used_pages;
 | 
				
			||||||
		kvm->arch.n_free_mmu_pages = 0;
 | 
							kvm->arch.n_free_mmu_pages = 0;
 | 
				
			||||||
| 
						 | 
					@ -2941,7 +2942,7 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page = container_of(kvm->arch.active_mmu_pages.prev,
 | 
						page = container_of(kvm->arch.active_mmu_pages.prev,
 | 
				
			||||||
			    struct kvm_mmu_page, link);
 | 
								    struct kvm_mmu_page, link);
 | 
				
			||||||
	return kvm_mmu_zap_page(kvm, page) + 1;
 | 
						return kvm_mmu_zap_page(kvm, page);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 | 
					static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue