forked from mirrors/linux
		
	kvm: optimize GFN to memslot lookup with large slots amount
Current linear search doesn't scale well when
large amount of memslots is used and looked up slot
is not in the beginning memslots array.
Taking in account that memslots don't overlap, it's
possible to switch sorting order of memslots array from
'npages' to 'base_gfn' and use binary search for
memslot lookup by GFN.
As result of switching to binary search lookup times
are reduced with large amount of memslots.
Following is a table of search_memslot() cycles
during WS2008R2 guest boot.
                         boot,          boot + ~10 min
                         mostly same    of using it,
                         slot lookup    randomized lookup
                max      average        average
                cycles   cycles         cycles
13 slots      : 1450       28           30
13 slots      : 1400       30           40
binary search
117 slots     : 13000      30           460
117 slots     : 2000       35           180
binary search
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
			
			
This commit is contained in:
		
							parent
							
								
									0e60b0799f
								
							
						
					
					
						commit
						9c1a5d3878
					
				
					 2 changed files with 27 additions and 11 deletions
				
			
		| 
						 | 
					@ -354,6 +354,7 @@ struct kvm_memslots {
 | 
				
			||||||
	/* The mapping table from slot id to the index in memslots[]. */
 | 
						/* The mapping table from slot id to the index in memslots[]. */
 | 
				
			||||||
	short id_to_index[KVM_MEM_SLOTS_NUM];
 | 
						short id_to_index[KVM_MEM_SLOTS_NUM];
 | 
				
			||||||
	atomic_t lru_slot;
 | 
						atomic_t lru_slot;
 | 
				
			||||||
 | 
						int used_slots;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct kvm {
 | 
					struct kvm {
 | 
				
			||||||
| 
						 | 
					@ -791,18 +792,27 @@ static inline void kvm_guest_exit(void)
 | 
				
			||||||
static inline struct kvm_memory_slot *
 | 
					static inline struct kvm_memory_slot *
 | 
				
			||||||
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
 | 
					search_memslots(struct kvm_memslots *slots, gfn_t gfn)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						int start = 0, end = slots->used_slots;
 | 
				
			||||||
	int slot = atomic_read(&slots->lru_slot);
 | 
						int slot = atomic_read(&slots->lru_slot);
 | 
				
			||||||
	struct kvm_memory_slot *memslot = &slots->memslots[slot];
 | 
						struct kvm_memory_slot *memslots = slots->memslots;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (gfn >= memslot->base_gfn &&
 | 
						if (gfn >= memslots[slot].base_gfn &&
 | 
				
			||||||
	    gfn < memslot->base_gfn + memslot->npages)
 | 
						    gfn < memslots[slot].base_gfn + memslots[slot].npages)
 | 
				
			||||||
		return memslot;
 | 
							return &memslots[slot];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kvm_for_each_memslot(memslot, slots)
 | 
						while (start < end) {
 | 
				
			||||||
		if (gfn >= memslot->base_gfn &&
 | 
							slot = start + (end - start) / 2;
 | 
				
			||||||
		      gfn < memslot->base_gfn + memslot->npages) {
 | 
					
 | 
				
			||||||
			atomic_set(&slots->lru_slot, memslot - slots->memslots);
 | 
							if (gfn >= memslots[slot].base_gfn)
 | 
				
			||||||
			return memslot;
 | 
								end = slot;
 | 
				
			||||||
 | 
							else
 | 
				
			||||||
 | 
								start = slot + 1;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (gfn >= memslots[start].base_gfn &&
 | 
				
			||||||
 | 
						    gfn < memslots[start].base_gfn + memslots[start].npages) {
 | 
				
			||||||
 | 
							atomic_set(&slots->lru_slot, start);
 | 
				
			||||||
 | 
							return &memslots[start];
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return NULL;
 | 
						return NULL;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -679,8 +679,14 @@ static void update_memslots(struct kvm_memslots *slots,
 | 
				
			||||||
	struct kvm_memory_slot *mslots = slots->memslots;
 | 
						struct kvm_memory_slot *mslots = slots->memslots;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	WARN_ON(mslots[i].id != id);
 | 
						WARN_ON(mslots[i].id != id);
 | 
				
			||||||
	if (!new->npages)
 | 
						if (!new->npages) {
 | 
				
			||||||
		new->base_gfn = 0;
 | 
							new->base_gfn = 0;
 | 
				
			||||||
 | 
							if (mslots[i].npages)
 | 
				
			||||||
 | 
								slots->used_slots--;
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							if (!mslots[i].npages)
 | 
				
			||||||
 | 
								slots->used_slots++;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (i < KVM_MEM_SLOTS_NUM - 1 &&
 | 
						while (i < KVM_MEM_SLOTS_NUM - 1 &&
 | 
				
			||||||
	       new->base_gfn <= mslots[i + 1].base_gfn) {
 | 
						       new->base_gfn <= mslots[i + 1].base_gfn) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue