forked from mirrors/linux
		
	xen/events: Mask a moving irq
Moving an unmasked irq may result in irq handler being invoked on both
source and target CPUs.
With 2-level this can happen as follows:
On source CPU:
        evtchn_2l_handle_events() ->
            generic_handle_irq() ->
                handle_edge_irq() ->
                   eoi_pirq():
                       irq_move_irq(data);
                       /***** WE ARE HERE *****/
                       if (VALID_EVTCHN(evtchn))
                           clear_evtchn(evtchn);
If at this moment target processor is handling an unrelated event in
evtchn_2l_handle_events()'s loop it may pick up our event since target's
cpu_evtchn_mask claims that this event belongs to it *and* the event is
unmasked and still pending. At the same time, source CPU will continue
executing its own handle_edge_irq().
With FIFO interrupt the scenario is similar: irq_move_irq() may result
in a EVTCHNOP_unmask hypercall which, in turn, may make the event
pending on the target CPU.
We can avoid this situation by moving and clearing the event while
keeping event masked.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: stable@vger.kernel.org
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
			
			
This commit is contained in:
		
							parent
							
								
									85d1a29de8
								
							
						
					
					
						commit
						ff1e22e7a6
					
				
					 1 changed files with 24 additions and 4 deletions
				
			
		| 
						 | 
				
			
			@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
 | 
			
		|||
	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
 | 
			
		||||
	int rc = 0;
 | 
			
		||||
 | 
			
		||||
	irq_move_irq(data);
 | 
			
		||||
	if (!VALID_EVTCHN(evtchn))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	if (VALID_EVTCHN(evtchn))
 | 
			
		||||
	if (unlikely(irqd_is_setaffinity_pending(data))) {
 | 
			
		||||
		int masked = test_and_set_mask(evtchn);
 | 
			
		||||
 | 
			
		||||
		clear_evtchn(evtchn);
 | 
			
		||||
 | 
			
		||||
		irq_move_masked_irq(data);
 | 
			
		||||
 | 
			
		||||
		if (!masked)
 | 
			
		||||
			unmask_evtchn(evtchn);
 | 
			
		||||
	} else
 | 
			
		||||
		clear_evtchn(evtchn);
 | 
			
		||||
 | 
			
		||||
	if (pirq_needs_eoi(data->irq)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
 | 
			
		|||
{
 | 
			
		||||
	int evtchn = evtchn_from_irq(data->irq);
 | 
			
		||||
 | 
			
		||||
	irq_move_irq(data);
 | 
			
		||||
	if (!VALID_EVTCHN(evtchn))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	if (VALID_EVTCHN(evtchn))
 | 
			
		||||
	if (unlikely(irqd_is_setaffinity_pending(data))) {
 | 
			
		||||
		int masked = test_and_set_mask(evtchn);
 | 
			
		||||
 | 
			
		||||
		clear_evtchn(evtchn);
 | 
			
		||||
 | 
			
		||||
		irq_move_masked_irq(data);
 | 
			
		||||
 | 
			
		||||
		if (!masked)
 | 
			
		||||
			unmask_evtchn(evtchn);
 | 
			
		||||
	} else
 | 
			
		||||
		clear_evtchn(evtchn);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue