mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-01 00:58:39 +02:00 
			
		
		
		
	irq: Fix typos in comments
Fix ~36 single-word typos in the IRQ, irqchip and irqdomain code comments. Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Marc Zyngier <maz@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									c93a5e20c3
								
							
						
					
					
						commit
						a359f75796
					
				
					 23 changed files with 36 additions and 36 deletions
				
			
		|  | @ -71,7 +71,7 @@ static void vic_init_hw(struct aspeed_vic *vic) | ||||||
| 	writel(0, vic->base + AVIC_INT_SELECT); | 	writel(0, vic->base + AVIC_INT_SELECT); | ||||||
| 	writel(0, vic->base + AVIC_INT_SELECT + 4); | 	writel(0, vic->base + AVIC_INT_SELECT + 4); | ||||||
| 
 | 
 | ||||||
| 	/* Some interrupts have a programable high/low level trigger
 | 	/* Some interrupts have a programmable high/low level trigger
 | ||||||
| 	 * (4 GPIO direct inputs), for now we assume this was configured | 	 * (4 GPIO direct inputs), for now we assume this was configured | ||||||
| 	 * by firmware. We read which ones are edge now. | 	 * by firmware. We read which ones are edge now. | ||||||
| 	 */ | 	 */ | ||||||
|  | @ -203,7 +203,7 @@ static int __init avic_of_init(struct device_node *node, | ||||||
| 	} | 	} | ||||||
| 	vic->base = regs; | 	vic->base = regs; | ||||||
| 
 | 
 | ||||||
| 	/* Initialize soures, all masked */ | 	/* Initialize sources, all masked */ | ||||||
| 	vic_init_hw(vic); | 	vic_init_hw(vic); | ||||||
| 
 | 
 | ||||||
| 	/* Ready to receive interrupts */ | 	/* Ready to receive interrupts */ | ||||||
|  |  | ||||||
|  | @ -309,7 +309,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, | ||||||
| 
 | 
 | ||||||
| 		if (data->can_wake) { | 		if (data->can_wake) { | ||||||
| 			/* This IRQ chip can wake the system, set all
 | 			/* This IRQ chip can wake the system, set all
 | ||||||
| 			 * relevant child interupts in wake_enabled mask | 			 * relevant child interrupts in wake_enabled mask | ||||||
| 			 */ | 			 */ | ||||||
| 			gc->wake_enabled = 0xffffffff; | 			gc->wake_enabled = 0xffffffff; | ||||||
| 			gc->wake_enabled &= ~gc->unused; | 			gc->wake_enabled &= ~gc->unused; | ||||||
|  |  | ||||||
|  | @ -176,7 +176,7 @@ gx_intc_init(struct device_node *node, struct device_node *parent) | ||||||
| 	writel(0x0, reg_base + GX_INTC_NEN63_32); | 	writel(0x0, reg_base + GX_INTC_NEN63_32); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Initial mask reg with all unmasked, because we only use enalbe reg | 	 * Initial mask reg with all unmasked, because we only use enable reg | ||||||
| 	 */ | 	 */ | ||||||
| 	writel(0x0, reg_base + GX_INTC_NMASK31_00); | 	writel(0x0, reg_base + GX_INTC_NMASK31_00); | ||||||
| 	writel(0x0, reg_base + GX_INTC_NMASK63_32); | 	writel(0x0, reg_base + GX_INTC_NMASK63_32); | ||||||
|  |  | ||||||
|  | @ -371,7 +371,7 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode, | ||||||
| 	 * the MSI data is the absolute value within the range from | 	 * the MSI data is the absolute value within the range from | ||||||
| 	 * spi_start to (spi_start + num_spis). | 	 * spi_start to (spi_start + num_spis). | ||||||
| 	 * | 	 * | ||||||
| 	 * Broadom NS2 GICv2m implementation has an erratum where the MSI data | 	 * Broadcom NS2 GICv2m implementation has an erratum where the MSI data | ||||||
| 	 * is 'spi_number - 32' | 	 * is 'spi_number - 32' | ||||||
| 	 * | 	 * | ||||||
| 	 * Reading that register fails on the Graviton implementation | 	 * Reading that register fails on the Graviton implementation | ||||||
|  |  | ||||||
|  | @ -1492,7 +1492,7 @@ static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) | ||||||
| 	 * | 	 * | ||||||
| 	 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI | 	 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI | ||||||
| 	 * value or to 1023, depending on the enable bit. But that | 	 * value or to 1023, depending on the enable bit. But that | ||||||
| 	 * would be issueing a mapping for an /existing/ DevID+EventID | 	 * would be issuing a mapping for an /existing/ DevID+EventID | ||||||
| 	 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI | 	 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI | ||||||
| 	 * to the /same/ vPE, using this opportunity to adjust the | 	 * to the /same/ vPE, using this opportunity to adjust the | ||||||
| 	 * doorbell. Mouahahahaha. We loves it, Precious. | 	 * doorbell. Mouahahahaha. We loves it, Precious. | ||||||
|  | @ -3122,7 +3122,7 @@ static void its_cpu_init_lpis(void) | ||||||
| 
 | 
 | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * It's possible for CPU to receive VLPIs before it is | 		 * It's possible for CPU to receive VLPIs before it is | ||||||
| 		 * sheduled as a vPE, especially for the first CPU, and the | 		 * scheduled as a vPE, especially for the first CPU, and the | ||||||
| 		 * VLPI with INTID larger than 2^(IDbits+1) will be considered | 		 * VLPI with INTID larger than 2^(IDbits+1) will be considered | ||||||
| 		 * as out of range and dropped by GIC. | 		 * as out of range and dropped by GIC. | ||||||
| 		 * So we initialize IDbits to known value to avoid VLPI drop. | 		 * So we initialize IDbits to known value to avoid VLPI drop. | ||||||
|  | @ -3616,7 +3616,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * If all interrupts have been freed, start mopping the | 	 * If all interrupts have been freed, start mopping the | ||||||
| 	 * floor. This is conditionned on the device not being shared. | 	 * floor. This is conditioned on the device not being shared. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (!its_dev->shared && | 	if (!its_dev->shared && | ||||||
| 	    bitmap_empty(its_dev->event_map.lpi_map, | 	    bitmap_empty(its_dev->event_map.lpi_map, | ||||||
|  | @ -4194,7 +4194,7 @@ static int its_sgi_set_affinity(struct irq_data *d, | ||||||
| { | { | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * There is no notion of affinity for virtual SGIs, at least | 	 * There is no notion of affinity for virtual SGIs, at least | ||||||
| 	 * not on the host (since they can only be targetting a vPE). | 	 * not on the host (since they can only be targeting a vPE). | ||||||
| 	 * Tell the kernel we've done whatever it asked for. | 	 * Tell the kernel we've done whatever it asked for. | ||||||
| 	 */ | 	 */ | ||||||
| 	irq_data_update_effective_affinity(d, mask_val); | 	irq_data_update_effective_affinity(d, mask_val); | ||||||
|  | @ -4239,7 +4239,7 @@ static int its_sgi_get_irqchip_state(struct irq_data *d, | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Locking galore! We can race against two different events: | 	 * Locking galore! We can race against two different events: | ||||||
| 	 * | 	 * | ||||||
| 	 * - Concurent vPE affinity change: we must make sure it cannot | 	 * - Concurrent vPE affinity change: we must make sure it cannot | ||||||
| 	 *   happen, or we'll talk to the wrong redistributor. This is | 	 *   happen, or we'll talk to the wrong redistributor. This is | ||||||
| 	 *   identical to what happens with vLPIs. | 	 *   identical to what happens with vLPIs. | ||||||
| 	 * | 	 * | ||||||
|  |  | ||||||
|  | @ -1379,7 +1379,7 @@ static int gic_irq_domain_translate(struct irq_domain *d, | ||||||
| 
 | 
 | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * Make it clear that broken DTs are... broken. | 		 * Make it clear that broken DTs are... broken. | ||||||
| 		 * Partitionned PPIs are an unfortunate exception. | 		 * Partitioned PPIs are an unfortunate exception. | ||||||
| 		 */ | 		 */ | ||||||
| 		WARN_ON(*type == IRQ_TYPE_NONE && | 		WARN_ON(*type == IRQ_TYPE_NONE && | ||||||
| 			fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); | 			fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); | ||||||
|  |  | ||||||
|  | @ -163,7 +163,7 @@ static void pch_pic_reset(struct pch_pic *priv) | ||||||
| 	int i; | 	int i; | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < PIC_COUNT; i++) { | 	for (i = 0; i < PIC_COUNT; i++) { | ||||||
| 		/* Write vectore ID */ | 		/* Write vectored ID */ | ||||||
| 		writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i)); | 		writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i)); | ||||||
| 		/* Hardcode route to HT0 Lo */ | 		/* Hardcode route to HT0 Lo */ | ||||||
| 		writeb(1, priv->base + PCH_INT_ROUTE(i)); | 		writeb(1, priv->base + PCH_INT_ROUTE(i)); | ||||||
|  |  | ||||||
|  | @ -227,7 +227,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Get the hwirq number assigned to this channel through | 	 * Get the hwirq number assigned to this channel through | ||||||
| 	 * a pointer the channel_irq table. The added benifit of this | 	 * a pointer the channel_irq table. The added benefit of this | ||||||
| 	 * method is that we can also retrieve the channel index with | 	 * method is that we can also retrieve the channel index with | ||||||
| 	 * it, using the table base. | 	 * it, using the table base. | ||||||
| 	 */ | 	 */ | ||||||
|  |  | ||||||
|  | @ -217,7 +217,7 @@ static void mtk_cirq_resume(void) | ||||||
| { | { | ||||||
| 	u32 value; | 	u32 value; | ||||||
| 
 | 
 | ||||||
| 	/* flush recored interrupts, will send signals to parent controller */ | 	/* flush recorded interrupts, will send signals to parent controller */ | ||||||
| 	value = readl_relaxed(cirq_data->base + CIRQ_CONTROL); | 	value = readl_relaxed(cirq_data->base + CIRQ_CONTROL); | ||||||
| 	writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL); | 	writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -58,7 +58,7 @@ struct icoll_priv { | ||||||
| static struct icoll_priv icoll_priv; | static struct icoll_priv icoll_priv; | ||||||
| static struct irq_domain *icoll_domain; | static struct irq_domain *icoll_domain; | ||||||
| 
 | 
 | ||||||
| /* calculate bit offset depending on number of intterupt per register */ | /* calculate bit offset depending on number of interrupt per register */ | ||||||
| static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit) | static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit) | ||||||
| { | { | ||||||
| 	/*
 | 	/*
 | ||||||
|  | @ -68,7 +68,7 @@ static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit) | ||||||
| 	return bit << ((d->hwirq & 3) << 3); | 	return bit << ((d->hwirq & 3) << 3); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* calculate mem offset depending on number of intterupt per register */ | /* calculate mem offset depending on number of interrupt per register */ | ||||||
| static void __iomem *icoll_intr_reg(struct irq_data *d) | static void __iomem *icoll_intr_reg(struct irq_data *d) | ||||||
| { | { | ||||||
| 	/* offset = hwirq / intr_per_reg * 0x10 */ | 	/* offset = hwirq / intr_per_reg * 0x10 */ | ||||||
|  |  | ||||||
|  | @ -189,7 +189,7 @@ static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs) | ||||||
| 	 * 3) spurious irq | 	 * 3) spurious irq | ||||||
| 	 * So if we immediately get a reading of 0, check the irq-pending reg | 	 * So if we immediately get a reading of 0, check the irq-pending reg | ||||||
| 	 * to differentiate between 2 and 3. We only do this once to avoid | 	 * to differentiate between 2 and 3. We only do this once to avoid | ||||||
| 	 * the extra check in the common case of 1 hapening after having | 	 * the extra check in the common case of 1 happening after having | ||||||
| 	 * read the vector-reg once. | 	 * read the vector-reg once. | ||||||
| 	 */ | 	 */ | ||||||
| 	hwirq = readl(irq_ic_data->irq_base + SUN4I_IRQ_VECTOR_REG) >> 2; | 	hwirq = readl(irq_ic_data->irq_base + SUN4I_IRQ_VECTOR_REG) >> 2; | ||||||
|  |  | ||||||
|  | @ -78,7 +78,7 @@ struct ti_sci_inta_vint_desc { | ||||||
|  * struct ti_sci_inta_irq_domain - Structure representing a TISCI based |  * struct ti_sci_inta_irq_domain - Structure representing a TISCI based | ||||||
|  *				   Interrupt Aggregator IRQ domain. |  *				   Interrupt Aggregator IRQ domain. | ||||||
|  * @sci:		Pointer to TISCI handle |  * @sci:		Pointer to TISCI handle | ||||||
|  * @vint:		TISCI resource pointer representing IA inerrupts. |  * @vint:		TISCI resource pointer representing IA interrupts. | ||||||
|  * @global_event:	TISCI resource pointer representing global events. |  * @global_event:	TISCI resource pointer representing global events. | ||||||
|  * @vint_list:		List of the vints active in the system |  * @vint_list:		List of the vints active in the system | ||||||
|  * @vint_mutex:		Mutex to protect vint_list |  * @vint_mutex:		Mutex to protect vint_list | ||||||
|  |  | ||||||
|  | @ -163,7 +163,7 @@ static struct syscore_ops vic_syscore_ops = { | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * vic_pm_init - initicall to register VIC pm |  * vic_pm_init - initcall to register VIC pm | ||||||
|  * |  * | ||||||
|  * This is called via late_initcall() to register |  * This is called via late_initcall() to register | ||||||
|  * the resources for the VICs due to the early |  * the resources for the VICs due to the early | ||||||
|  | @ -397,7 +397,7 @@ static void __init vic_clear_interrupts(void __iomem *base) | ||||||
| /*
 | /*
 | ||||||
|  * The PL190 cell from ARM has been modified by ST to handle 64 interrupts. |  * The PL190 cell from ARM has been modified by ST to handle 64 interrupts. | ||||||
|  * The original cell has 32 interrupts, while the modified one has 64, |  * The original cell has 32 interrupts, while the modified one has 64, | ||||||
|  * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case |  * replicating two blocks 0x00..0x1f in 0x20..0x3f. In that case | ||||||
|  * the probe function is called twice, with base set to offset 000 |  * the probe function is called twice, with base set to offset 000 | ||||||
|  *  and 020 within the page. We call this "second block". |  *  and 020 within the page. We call this "second block". | ||||||
|  */ |  */ | ||||||
|  |  | ||||||
|  | @ -210,7 +210,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc, | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Disable all external interrupts until they are | 	 * Disable all external interrupts until they are | ||||||
| 	 * explicity requested. | 	 * explicitly requested. | ||||||
| 	 */ | 	 */ | ||||||
| 	xintc_write(irqc, IER, 0); | 	xintc_write(irqc, IER, 0); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -116,7 +116,7 @@ enum { | ||||||
|  * IRQ_SET_MASK_NOCPY	- OK, chip did update irq_common_data.affinity |  * IRQ_SET_MASK_NOCPY	- OK, chip did update irq_common_data.affinity | ||||||
|  * IRQ_SET_MASK_OK_DONE	- Same as IRQ_SET_MASK_OK for core. Special code to |  * IRQ_SET_MASK_OK_DONE	- Same as IRQ_SET_MASK_OK for core. Special code to | ||||||
|  *			  support stacked irqchips, which indicates skipping |  *			  support stacked irqchips, which indicates skipping | ||||||
|  *			  all descendent irqchips. |  *			  all descendant irqchips. | ||||||
|  */ |  */ | ||||||
| enum { | enum { | ||||||
| 	IRQ_SET_MASK_OK = 0, | 	IRQ_SET_MASK_OK = 0, | ||||||
|  | @ -302,7 +302,7 @@ static inline bool irqd_is_level_type(struct irq_data *d) | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Must only be called of irqchip.irq_set_affinity() or low level |  * Must only be called of irqchip.irq_set_affinity() or low level | ||||||
|  * hieararchy domain allocation functions. |  * hierarchy domain allocation functions. | ||||||
|  */ |  */ | ||||||
| static inline void irqd_set_single_target(struct irq_data *d) | static inline void irqd_set_single_target(struct irq_data *d) | ||||||
| { | { | ||||||
|  |  | ||||||
|  | @ -32,7 +32,7 @@ struct pt_regs; | ||||||
|  * @last_unhandled:	aging timer for unhandled count |  * @last_unhandled:	aging timer for unhandled count | ||||||
|  * @irqs_unhandled:	stats field for spurious unhandled interrupts |  * @irqs_unhandled:	stats field for spurious unhandled interrupts | ||||||
|  * @threads_handled:	stats field for deferred spurious detection of threaded handlers |  * @threads_handled:	stats field for deferred spurious detection of threaded handlers | ||||||
|  * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers |  * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers | ||||||
|  * @lock:		locking for SMP |  * @lock:		locking for SMP | ||||||
|  * @affinity_hint:	hint to user space for preferred irq affinity |  * @affinity_hint:	hint to user space for preferred irq affinity | ||||||
|  * @affinity_notify:	context for notification of affinity changes |  * @affinity_notify:	context for notification of affinity changes | ||||||
|  |  | ||||||
|  | @ -808,7 +808,7 @@ void handle_edge_irq(struct irq_desc *desc) | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * When another irq arrived while we were handling | 		 * When another irq arrived while we were handling | ||||||
| 		 * one, we could have masked the irq. | 		 * one, we could have masked the irq. | ||||||
| 		 * Renable it, if it was not disabled in meantime. | 		 * Reenable it, if it was not disabled in meantime. | ||||||
| 		 */ | 		 */ | ||||||
| 		if (unlikely(desc->istate & IRQS_PENDING)) { | 		if (unlikely(desc->istate & IRQS_PENDING)) { | ||||||
| 			if (!irqd_irq_disabled(&desc->irq_data) && | 			if (!irqd_irq_disabled(&desc->irq_data) && | ||||||
|  |  | ||||||
|  | @ -13,7 +13,7 @@ | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * What should we do if we get a hw irq event on an illegal vector? |  * What should we do if we get a hw irq event on an illegal vector? | ||||||
|  * Each architecture has to answer this themself. |  * Each architecture has to answer this themselves. | ||||||
|  */ |  */ | ||||||
| static void ack_bad(struct irq_data *data) | static void ack_bad(struct irq_data *data) | ||||||
| { | { | ||||||
|  |  | ||||||
|  | @ -31,7 +31,7 @@ static int __init irq_affinity_setup(char *str) | ||||||
| 	cpulist_parse(str, irq_default_affinity); | 	cpulist_parse(str, irq_default_affinity); | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Set at least the boot cpu. We don't want to end up with | 	 * Set at least the boot cpu. We don't want to end up with | ||||||
| 	 * bugreports caused by random comandline masks | 	 * bugreports caused by random commandline masks | ||||||
| 	 */ | 	 */ | ||||||
| 	cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | 	cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | ||||||
| 	return 1; | 	return 1; | ||||||
|  |  | ||||||
|  | @ -62,7 +62,7 @@ EXPORT_SYMBOL_GPL(irqchip_fwnode_ops); | ||||||
|  * @name:	Optional user provided domain name |  * @name:	Optional user provided domain name | ||||||
|  * @pa:		Optional user-provided physical address |  * @pa:		Optional user-provided physical address | ||||||
|  * |  * | ||||||
|  * Allocate a struct irqchip_fwid, and return a poiner to the embedded |  * Allocate a struct irqchip_fwid, and return a pointer to the embedded | ||||||
|  * fwnode_handle (or NULL on failure). |  * fwnode_handle (or NULL on failure). | ||||||
|  * |  * | ||||||
|  * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are |  * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are | ||||||
|  | @ -665,7 +665,7 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain, | ||||||
| 
 | 
 | ||||||
| 	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); | 	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); | ||||||
| 
 | 
 | ||||||
| 	/* Look for default domain if nececssary */ | 	/* Look for default domain if necessary */ | ||||||
| 	if (domain == NULL) | 	if (domain == NULL) | ||||||
| 		domain = irq_default_domain; | 		domain = irq_default_domain; | ||||||
| 	if (domain == NULL) { | 	if (domain == NULL) { | ||||||
|  | @ -906,7 +906,7 @@ unsigned int irq_find_mapping(struct irq_domain *domain, | ||||||
| { | { | ||||||
| 	struct irq_data *data; | 	struct irq_data *data; | ||||||
| 
 | 
 | ||||||
| 	/* Look for default domain if nececssary */ | 	/* Look for default domain if necessary */ | ||||||
| 	if (domain == NULL) | 	if (domain == NULL) | ||||||
| 		domain = irq_default_domain; | 		domain = irq_default_domain; | ||||||
| 	if (domain == NULL) | 	if (domain == NULL) | ||||||
|  | @ -1436,7 +1436,7 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, | ||||||
|  * The whole process to setup an IRQ has been split into two steps. |  * The whole process to setup an IRQ has been split into two steps. | ||||||
|  * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ |  * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ | ||||||
|  * descriptor and required hardware resources. The second step, |  * descriptor and required hardware resources. The second step, | ||||||
|  * irq_domain_activate_irq(), is to program hardwares with preallocated |  * irq_domain_activate_irq(), is to program the hardware with preallocated | ||||||
|  * resources. In this way, it's easier to rollback when failing to |  * resources. In this way, it's easier to rollback when failing to | ||||||
|  * allocate resources. |  * allocate resources. | ||||||
|  */ |  */ | ||||||
|  |  | ||||||
|  | @ -326,7 +326,7 @@ static bool irq_set_affinity_deactivated(struct irq_data *data, | ||||||
| 	 * If the interrupt is not yet activated, just store the affinity | 	 * If the interrupt is not yet activated, just store the affinity | ||||||
| 	 * mask and do not call the chip driver at all. On activation the | 	 * mask and do not call the chip driver at all. On activation the | ||||||
| 	 * driver has to make sure anyway that the interrupt is in a | 	 * driver has to make sure anyway that the interrupt is in a | ||||||
| 	 * useable state so startup works. | 	 * usable state so startup works. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || | 	if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || | ||||||
| 	    irqd_is_activated(data) || !irqd_affinity_on_activate(data)) | 	    irqd_is_activated(data) || !irqd_affinity_on_activate(data)) | ||||||
|  | @ -1054,7 +1054,7 @@ static void irq_finalize_oneshot(struct irq_desc *desc, | ||||||
| 	 * to IRQS_INPROGRESS and the irq line is masked forever. | 	 * to IRQS_INPROGRESS and the irq line is masked forever. | ||||||
| 	 * | 	 * | ||||||
| 	 * This also serializes the state of shared oneshot handlers | 	 * This also serializes the state of shared oneshot handlers | ||||||
| 	 * versus "desc->threads_onehsot |= action->thread_mask;" in | 	 * versus "desc->threads_oneshot |= action->thread_mask;" in | ||||||
| 	 * irq_wake_thread(). See the comment there which explains the | 	 * irq_wake_thread(). See the comment there which explains the | ||||||
| 	 * serialization. | 	 * serialization. | ||||||
| 	 */ | 	 */ | ||||||
|  | @ -1909,7 +1909,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) | ||||||
| 	/* Last action releases resources */ | 	/* Last action releases resources */ | ||||||
| 	if (!desc->action) { | 	if (!desc->action) { | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * Reaquire bus lock as irq_release_resources() might | 		 * Reacquire bus lock as irq_release_resources() might | ||||||
| 		 * require it to deallocate resources over the slow bus. | 		 * require it to deallocate resources over the slow bus. | ||||||
| 		 */ | 		 */ | ||||||
| 		chip_bus_lock(desc); | 		chip_bus_lock(desc); | ||||||
|  |  | ||||||
|  | @ -5,7 +5,7 @@ | ||||||
|  * |  * | ||||||
|  * This file is licensed under GPLv2. |  * This file is licensed under GPLv2. | ||||||
|  * |  * | ||||||
|  * This file contains common code to support Message Signalled Interrupt for |  * This file contains common code to support Message Signaled Interrupts for | ||||||
|  * PCI compatible and non PCI compatible devices. |  * PCI compatible and non PCI compatible devices. | ||||||
|  */ |  */ | ||||||
| #include <linux/types.h> | #include <linux/types.h> | ||||||
|  |  | ||||||
|  | @ -485,7 +485,7 @@ static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts) | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * The interrupt triggered more than one second apart, that | 	 * The interrupt triggered more than one second apart, that | ||||||
| 	 * ends the sequence as predictible for our purpose. In this | 	 * ends the sequence as predictable for our purpose. In this | ||||||
| 	 * case, assume we have the beginning of a sequence and the | 	 * case, assume we have the beginning of a sequence and the | ||||||
| 	 * timestamp is the first value. As it is impossible to | 	 * timestamp is the first value. As it is impossible to | ||||||
| 	 * predict anything at this point, return. | 	 * predict anything at this point, return. | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Ingo Molnar
						Ingo Molnar