mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	x86/paravirt: Fix native_patch()
While chasing a regression I noticed we potentially patch the wrong
code in native_patch().
If we do not select the native code sequence, we must use the default
patcher, not fall-through the switch case.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Chris Wright <chrisw@sous-sol.org>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Anvin <hpa@zytor.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kernel test robot <xiaolong.ye@intel.com>
Fixes: 3cded41794 ("x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted()")
Link: http://lkml.kernel.org/r/20161208154349.270616999@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
			
			
This commit is contained in:
		
							parent
							
								
									6f38751510
								
							
						
					
					
						commit
						45dbea5f55
					
				
					 2 changed files with 8 additions and 0 deletions
				
			
		| 
						 | 
				
			
			@ -58,15 +58,19 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
 | 
			
		|||
				end   = end_pv_lock_ops_queued_spin_unlock;
 | 
			
		||||
				goto patch_site;
 | 
			
		||||
			}
 | 
			
		||||
			goto patch_default;
 | 
			
		||||
 | 
			
		||||
		case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
 | 
			
		||||
			if (pv_is_native_vcpu_is_preempted()) {
 | 
			
		||||
				start = start_pv_lock_ops_vcpu_is_preempted;
 | 
			
		||||
				end   = end_pv_lock_ops_vcpu_is_preempted;
 | 
			
		||||
				goto patch_site;
 | 
			
		||||
			}
 | 
			
		||||
			goto patch_default;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	default:
 | 
			
		||||
patch_default:
 | 
			
		||||
		ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -70,15 +70,19 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
 | 
			
		|||
				end   = end_pv_lock_ops_queued_spin_unlock;
 | 
			
		||||
				goto patch_site;
 | 
			
		||||
			}
 | 
			
		||||
			goto patch_default;
 | 
			
		||||
 | 
			
		||||
		case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
 | 
			
		||||
			if (pv_is_native_vcpu_is_preempted()) {
 | 
			
		||||
				start = start_pv_lock_ops_vcpu_is_preempted;
 | 
			
		||||
				end   = end_pv_lock_ops_vcpu_is_preempted;
 | 
			
		||||
				goto patch_site;
 | 
			
		||||
			}
 | 
			
		||||
			goto patch_default;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	default:
 | 
			
		||||
patch_default:
 | 
			
		||||
		ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue