mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	Merge branch 'irq/for-x86' into irq/core
Get the infrastructure patches which are required for x86/apic into core
This commit is contained in:
		
						commit
						f05218651b
					
				
					 8 changed files with 88 additions and 44 deletions
				
			
		|  | @ -126,13 +126,21 @@ struct msi_desc; | ||||||
| struct irq_domain; | struct irq_domain; | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * struct irq_data - per irq and irq chip data passed down to chip functions |  * struct irq_common_data - per irq data shared by all irqchips | ||||||
|  |  * @state_use_accessors: status information for irq chip functions. | ||||||
|  |  *			Use accessor functions to deal with it | ||||||
|  |  */ | ||||||
|  | struct irq_common_data { | ||||||
|  | 	unsigned int		state_use_accessors; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct irq_data - per irq chip data passed down to chip functions | ||||||
|  * @mask:		precomputed bitmask for accessing the chip registers |  * @mask:		precomputed bitmask for accessing the chip registers | ||||||
|  * @irq:		interrupt number |  * @irq:		interrupt number | ||||||
|  * @hwirq:		hardware interrupt number, local to the interrupt domain |  * @hwirq:		hardware interrupt number, local to the interrupt domain | ||||||
|  * @node:		node index useful for balancing |  * @node:		node index useful for balancing | ||||||
|  * @state_use_accessors: status information for irq chip functions. |  * @common:		point to data shared by all irqchips | ||||||
|  *			Use accessor functions to deal with it |  | ||||||
|  * @chip:		low level interrupt hardware access |  * @chip:		low level interrupt hardware access | ||||||
|  * @domain:		Interrupt translation domain; responsible for mapping |  * @domain:		Interrupt translation domain; responsible for mapping | ||||||
|  *			between hwirq number and linux irq number. |  *			between hwirq number and linux irq number. | ||||||
|  | @ -153,7 +161,7 @@ struct irq_data { | ||||||
| 	unsigned int		irq; | 	unsigned int		irq; | ||||||
| 	unsigned long		hwirq; | 	unsigned long		hwirq; | ||||||
| 	unsigned int		node; | 	unsigned int		node; | ||||||
| 	unsigned int		state_use_accessors; | 	struct irq_common_data	*common; | ||||||
| 	struct irq_chip		*chip; | 	struct irq_chip		*chip; | ||||||
| 	struct irq_domain	*domain; | 	struct irq_domain	*domain; | ||||||
| #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY | #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY | ||||||
|  | @ -166,7 +174,7 @@ struct irq_data { | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Bit masks for irq_data.state |  * Bit masks for irq_common_data.state_use_accessors | ||||||
|  * |  * | ||||||
|  * IRQD_TRIGGER_MASK		- Mask for the trigger type bits |  * IRQD_TRIGGER_MASK		- Mask for the trigger type bits | ||||||
|  * IRQD_SETAFFINITY_PENDING	- Affinity setting is pending |  * IRQD_SETAFFINITY_PENDING	- Affinity setting is pending | ||||||
|  | @ -198,34 +206,36 @@ enum { | ||||||
| 	IRQD_WAKEUP_ARMED		= (1 << 19), | 	IRQD_WAKEUP_ARMED		= (1 << 19), | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | #define __irqd_to_state(d)		((d)->common->state_use_accessors) | ||||||
|  | 
 | ||||||
| static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | ||||||
| { | { | ||||||
| 	return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; | 	return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool irqd_is_per_cpu(struct irq_data *d) | static inline bool irqd_is_per_cpu(struct irq_data *d) | ||||||
| { | { | ||||||
| 	return d->state_use_accessors & IRQD_PER_CPU; | 	return __irqd_to_state(d) & IRQD_PER_CPU; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool irqd_can_balance(struct irq_data *d) | static inline bool irqd_can_balance(struct irq_data *d) | ||||||
| { | { | ||||||
| 	return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); | 	return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING)); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool irqd_affinity_was_set(struct irq_data *d) | static inline bool irqd_affinity_was_set(struct irq_data *d) | ||||||
| { | { | ||||||
| 	return d->state_use_accessors & IRQD_AFFINITY_SET; | 	return __irqd_to_state(d) & IRQD_AFFINITY_SET; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void irqd_mark_affinity_was_set(struct irq_data *d) | static inline void irqd_mark_affinity_was_set(struct irq_data *d) | ||||||
| { | { | ||||||
| 	d->state_use_accessors |= IRQD_AFFINITY_SET; | 	__irqd_to_state(d) |= IRQD_AFFINITY_SET; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline u32 irqd_get_trigger_type(struct irq_data *d) | static inline u32 irqd_get_trigger_type(struct irq_data *d) | ||||||
| { | { | ||||||
| 	return d->state_use_accessors & IRQD_TRIGGER_MASK; | 	return __irqd_to_state(d) & IRQD_TRIGGER_MASK; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -233,43 +243,43 @@ static inline u32 irqd_get_trigger_type(struct irq_data *d) | ||||||
|  */ |  */ | ||||||
| static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) | ||||||
| { | { | ||||||
| 	d->state_use_accessors &= ~IRQD_TRIGGER_MASK; | 	__irqd_to_state(d) &= ~IRQD_TRIGGER_MASK; | ||||||
| 	d->state_use_accessors |= type & IRQD_TRIGGER_MASK; | 	__irqd_to_state(d) |= type & IRQD_TRIGGER_MASK; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool irqd_is_level_type(struct irq_data *d) | static inline bool irqd_is_level_type(struct irq_data *d) | ||||||
| { | { | ||||||
| 	return d->state_use_accessors & IRQD_LEVEL; | 	return __irqd_to_state(d) & IRQD_LEVEL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool irqd_is_wakeup_set(struct irq_data *d) | static inline bool irqd_is_wakeup_set(struct irq_data *d) | ||||||
| { | { | ||||||
| 	return d->state_use_accessors & IRQD_WAKEUP_STATE; | 	return __irqd_to_state(d) & IRQD_WAKEUP_STATE; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool irqd_can_move_in_process_context(struct irq_data *d) | static inline bool irqd_can_move_in_process_context(struct irq_data *d) | ||||||
| { | { | ||||||
| 	return d->state_use_accessors & IRQD_MOVE_PCNTXT; | 	return __irqd_to_state(d) & IRQD_MOVE_PCNTXT; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool irqd_irq_disabled(struct irq_data *d) | static inline bool irqd_irq_disabled(struct irq_data *d) | ||||||
| { | { | ||||||
| 	return d->state_use_accessors & IRQD_IRQ_DISABLED; | 	return __irqd_to_state(d) & IRQD_IRQ_DISABLED; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool irqd_irq_masked(struct irq_data *d) | static inline bool irqd_irq_masked(struct irq_data *d) | ||||||
| { | { | ||||||
| 	return d->state_use_accessors & IRQD_IRQ_MASKED; | 	return __irqd_to_state(d) & IRQD_IRQ_MASKED; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool irqd_irq_inprogress(struct irq_data *d) | static inline bool irqd_irq_inprogress(struct irq_data *d) | ||||||
| { | { | ||||||
| 	return d->state_use_accessors & IRQD_IRQ_INPROGRESS; | 	return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool irqd_is_wakeup_armed(struct irq_data *d) | static inline bool irqd_is_wakeup_armed(struct irq_data *d) | ||||||
| { | { | ||||||
| 	return d->state_use_accessors & IRQD_WAKEUP_ARMED; | 	return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -280,12 +290,12 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d) | ||||||
|  */ |  */ | ||||||
| static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) | static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) | ||||||
| { | { | ||||||
| 	d->state_use_accessors |= IRQD_IRQ_INPROGRESS; | 	__irqd_to_state(d) |= IRQD_IRQ_INPROGRESS; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | ||||||
| { | { | ||||||
| 	d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; | 	__irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | ||||||
|  | @ -641,6 +651,23 @@ static inline u32 irq_get_trigger_type(unsigned int irq) | ||||||
| 	return d ? irqd_get_trigger_type(d) : 0; | 	return d ? irqd_get_trigger_type(d) : 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static inline int irq_data_get_node(struct irq_data *d) | ||||||
|  | { | ||||||
|  | 	return d->node; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static inline struct cpumask *irq_get_affinity_mask(int irq) | ||||||
|  | { | ||||||
|  | 	struct irq_data *d = irq_get_irq_data(irq); | ||||||
|  | 
 | ||||||
|  | 	return d ? d->affinity : NULL; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) | ||||||
|  | { | ||||||
|  | 	return d->affinity; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| unsigned int arch_dynirq_lower_bound(unsigned int from); | unsigned int arch_dynirq_lower_bound(unsigned int from); | ||||||
| 
 | 
 | ||||||
| int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | ||||||
|  |  | ||||||
|  | @ -17,7 +17,7 @@ struct pt_regs; | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * struct irq_desc - interrupt descriptor |  * struct irq_desc - interrupt descriptor | ||||||
|  * @irq_data:		per irq and chip data passed down to chip functions |  * @irq_common_data:	per irq and chip data passed down to chip functions | ||||||
|  * @kstat_irqs:		irq stats per cpu |  * @kstat_irqs:		irq stats per cpu | ||||||
|  * @handle_irq:		highlevel irq-events handler |  * @handle_irq:		highlevel irq-events handler | ||||||
|  * @preflow_handler:	handler called before the flow handler (currently used by sparc) |  * @preflow_handler:	handler called before the flow handler (currently used by sparc) | ||||||
|  | @ -47,6 +47,7 @@ struct pt_regs; | ||||||
|  * @name:		flow handler name for /proc/interrupts output |  * @name:		flow handler name for /proc/interrupts output | ||||||
|  */ |  */ | ||||||
| struct irq_desc { | struct irq_desc { | ||||||
|  | 	struct irq_common_data	irq_common_data; | ||||||
| 	struct irq_data		irq_data; | 	struct irq_data		irq_data; | ||||||
| 	unsigned int __percpu	*kstat_irqs; | 	unsigned int __percpu	*kstat_irqs; | ||||||
| 	irq_flow_handler_t	handle_irq; | 	irq_flow_handler_t	handle_irq; | ||||||
|  | @ -93,6 +94,15 @@ struct irq_desc { | ||||||
| extern struct irq_desc irq_desc[NR_IRQS]; | extern struct irq_desc irq_desc[NR_IRQS]; | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
|  | static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) | ||||||
|  | { | ||||||
|  | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||||||
|  | 	return irq_to_desc(data->irq); | ||||||
|  | #else | ||||||
|  | 	return container_of(data, struct irq_desc, irq_data); | ||||||
|  | #endif | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) | static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) | ||||||
| { | { | ||||||
| 	return &desc->irq_data; | 	return &desc->irq_data; | ||||||
|  |  | ||||||
|  | @ -59,8 +59,6 @@ enum { | ||||||
| #include "debug.h" | #include "debug.h" | ||||||
| #include "settings.h" | #include "settings.h" | ||||||
| 
 | 
 | ||||||
| #define irq_data_to_desc(data)	container_of(data, struct irq_desc, irq_data) |  | ||||||
| 
 |  | ||||||
| extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | ||||||
| 		unsigned long flags); | 		unsigned long flags); | ||||||
| extern void __disable_irq(struct irq_desc *desc, unsigned int irq); | extern void __disable_irq(struct irq_desc *desc, unsigned int irq); | ||||||
|  | @ -170,27 +168,27 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) | ||||||
|  */ |  */ | ||||||
| static inline void irqd_set_move_pending(struct irq_data *d) | static inline void irqd_set_move_pending(struct irq_data *d) | ||||||
| { | { | ||||||
| 	d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; | 	__irqd_to_state(d) |= IRQD_SETAFFINITY_PENDING; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void irqd_clr_move_pending(struct irq_data *d) | static inline void irqd_clr_move_pending(struct irq_data *d) | ||||||
| { | { | ||||||
| 	d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; | 	__irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void irqd_clear(struct irq_data *d, unsigned int mask) | static inline void irqd_clear(struct irq_data *d, unsigned int mask) | ||||||
| { | { | ||||||
| 	d->state_use_accessors &= ~mask; | 	__irqd_to_state(d) &= ~mask; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void irqd_set(struct irq_data *d, unsigned int mask) | static inline void irqd_set(struct irq_data *d, unsigned int mask) | ||||||
| { | { | ||||||
| 	d->state_use_accessors |= mask; | 	__irqd_to_state(d) |= mask; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) | static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) | ||||||
| { | { | ||||||
| 	return d->state_use_accessors & mask; | 	return __irqd_to_state(d) & mask; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc) | static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc) | ||||||
|  | @ -199,6 +197,11 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *d | ||||||
| 	__this_cpu_inc(kstat.irqs_sum); | 	__this_cpu_inc(kstat.irqs_sum); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static inline int irq_desc_get_node(struct irq_desc *desc) | ||||||
|  | { | ||||||
|  | 	return irq_data_get_node(&desc->irq_data); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| #ifdef CONFIG_PM_SLEEP | #ifdef CONFIG_PM_SLEEP | ||||||
| bool irq_pm_check_wakeup(struct irq_desc *desc); | bool irq_pm_check_wakeup(struct irq_desc *desc); | ||||||
| void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); | ||||||
|  |  | ||||||
|  | @ -59,16 +59,10 @@ static void desc_smp_init(struct irq_desc *desc, int node) | ||||||
| #endif | #endif | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline int desc_node(struct irq_desc *desc) |  | ||||||
| { |  | ||||||
| 	return desc->irq_data.node; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #else | #else | ||||||
| static inline int | static inline int | ||||||
| alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } | ||||||
| static inline void desc_smp_init(struct irq_desc *desc, int node) { } | static inline void desc_smp_init(struct irq_desc *desc, int node) { } | ||||||
| static inline int desc_node(struct irq_desc *desc) { return 0; } |  | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, | ||||||
|  | @ -76,6 +70,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, | ||||||
| { | { | ||||||
| 	int cpu; | 	int cpu; | ||||||
| 
 | 
 | ||||||
|  | 	desc->irq_data.common = &desc->irq_common_data; | ||||||
| 	desc->irq_data.irq = irq; | 	desc->irq_data.irq = irq; | ||||||
| 	desc->irq_data.chip = &no_irq_chip; | 	desc->irq_data.chip = &no_irq_chip; | ||||||
| 	desc->irq_data.chip_data = NULL; | 	desc->irq_data.chip_data = NULL; | ||||||
|  | @ -299,7 +294,7 @@ static void free_desc(unsigned int irq) | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	raw_spin_lock_irqsave(&desc->lock, flags); | 	raw_spin_lock_irqsave(&desc->lock, flags); | ||||||
| 	desc_set_defaults(irq, desc, desc_node(desc), NULL); | 	desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL); | ||||||
| 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -830,10 +830,12 @@ static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, | ||||||
| { | { | ||||||
| 	struct irq_data *irq_data; | 	struct irq_data *irq_data; | ||||||
| 
 | 
 | ||||||
| 	irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, child->node); | 	irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, | ||||||
|  | 				irq_data_get_node(child)); | ||||||
| 	if (irq_data) { | 	if (irq_data) { | ||||||
| 		child->parent_data = irq_data; | 		child->parent_data = irq_data; | ||||||
| 		irq_data->irq = child->irq; | 		irq_data->irq = child->irq; | ||||||
|  | 		irq_data->common = child->common; | ||||||
| 		irq_data->node = child->node; | 		irq_data->node = child->node; | ||||||
| 		irq_data->domain = domain; | 		irq_data->domain = domain; | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -363,7 +363,7 @@ static int | ||||||
| setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | ||||||
| { | { | ||||||
| 	struct cpumask *set = irq_default_affinity; | 	struct cpumask *set = irq_default_affinity; | ||||||
| 	int node = desc->irq_data.node; | 	int node = irq_desc_get_node(desc); | ||||||
| 
 | 
 | ||||||
| 	/* Excludes PER_CPU and NO_BALANCE interrupts */ | 	/* Excludes PER_CPU and NO_BALANCE interrupts */ | ||||||
| 	if (!irq_can_set_affinity(irq)) | 	if (!irq_can_set_affinity(irq)) | ||||||
|  |  | ||||||
|  | @ -7,21 +7,21 @@ | ||||||
| void irq_move_masked_irq(struct irq_data *idata) | void irq_move_masked_irq(struct irq_data *idata) | ||||||
| { | { | ||||||
| 	struct irq_desc *desc = irq_data_to_desc(idata); | 	struct irq_desc *desc = irq_data_to_desc(idata); | ||||||
| 	struct irq_chip *chip = idata->chip; | 	struct irq_chip *chip = desc->irq_data.chip; | ||||||
| 
 | 
 | ||||||
| 	if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) | 	if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
|  | 	irqd_clr_move_pending(&desc->irq_data); | ||||||
|  | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. | 	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (!irqd_can_balance(&desc->irq_data)) { | 	if (irqd_is_per_cpu(&desc->irq_data)) { | ||||||
| 		WARN_ON(1); | 		WARN_ON(1); | ||||||
| 		return; | 		return; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	irqd_clr_move_pending(&desc->irq_data); |  | ||||||
| 
 |  | ||||||
| 	if (unlikely(cpumask_empty(desc->pending_mask))) | 	if (unlikely(cpumask_empty(desc->pending_mask))) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
|  | @ -52,6 +52,13 @@ void irq_move_irq(struct irq_data *idata) | ||||||
| { | { | ||||||
| 	bool masked; | 	bool masked; | ||||||
| 
 | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled, | ||||||
|  | 	 * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is | ||||||
|  | 	 * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here. | ||||||
|  | 	 */ | ||||||
|  | 	idata = irq_desc_get_irq_data(irq_data_to_desc(idata)); | ||||||
|  | 
 | ||||||
| 	if (likely(!irqd_is_setaffinity_pending(idata))) | 	if (likely(!irqd_is_setaffinity_pending(idata))) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -241,7 +241,7 @@ static int irq_node_proc_show(struct seq_file *m, void *v) | ||||||
| { | { | ||||||
| 	struct irq_desc *desc = irq_to_desc((long) m->private); | 	struct irq_desc *desc = irq_to_desc((long) m->private); | ||||||
| 
 | 
 | ||||||
| 	seq_printf(m, "%d\n", desc->irq_data.node); | 	seq_printf(m, "%d\n", irq_desc_get_node(desc)); | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Thomas Gleixner
						Thomas Gleixner