mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	genirq/irqdesc: Switch to lock guards
Replace all lock/unlock pairs with lock guards and simplify the code flow. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Jiri Slaby <jirislaby@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/all/871ptaqhoo.ffs@tglx
This commit is contained in:
		
							parent
							
								
									0f70a49f3f
								
							
						
					
					
						commit
						5d964a9f7c
					
				
					 1 changed files with 44 additions and 85 deletions
				
			
		|  | @ -246,8 +246,7 @@ static struct kobject *irq_kobj_base; | ||||||
| #define IRQ_ATTR_RO(_name) \ | #define IRQ_ATTR_RO(_name) \ | ||||||
| static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | ||||||
| 
 | 
 | ||||||
| static ssize_t per_cpu_count_show(struct kobject *kobj, | static ssize_t per_cpu_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||||
| 				  struct kobj_attribute *attr, char *buf) |  | ||||||
| { | { | ||||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||||
| 	ssize_t ret = 0; | 	ssize_t ret = 0; | ||||||
|  | @ -266,99 +265,75 @@ static ssize_t per_cpu_count_show(struct kobject *kobj, | ||||||
| } | } | ||||||
| IRQ_ATTR_RO(per_cpu_count); | IRQ_ATTR_RO(per_cpu_count); | ||||||
| 
 | 
 | ||||||
| static ssize_t chip_name_show(struct kobject *kobj, | static ssize_t chip_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||||
| 			      struct kobj_attribute *attr, char *buf) |  | ||||||
| { | { | ||||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||||
| 	ssize_t ret = 0; |  | ||||||
| 
 | 
 | ||||||
| 	raw_spin_lock_irq(&desc->lock); | 	guard(raw_spinlock_irq)(&desc->lock); | ||||||
| 	if (desc->irq_data.chip && desc->irq_data.chip->name) | 	if (desc->irq_data.chip && desc->irq_data.chip->name) | ||||||
| 		ret = sysfs_emit(buf, "%s\n", desc->irq_data.chip->name); | 		return sysfs_emit(buf, "%s\n", desc->irq_data.chip->name); | ||||||
| 	raw_spin_unlock_irq(&desc->lock); | 	return 0; | ||||||
| 
 |  | ||||||
| 	return ret; |  | ||||||
| } | } | ||||||
| IRQ_ATTR_RO(chip_name); | IRQ_ATTR_RO(chip_name); | ||||||
| 
 | 
 | ||||||
| static ssize_t hwirq_show(struct kobject *kobj, | static ssize_t hwirq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||||
| 			  struct kobj_attribute *attr, char *buf) |  | ||||||
| { | { | ||||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||||
| 	ssize_t ret = 0; |  | ||||||
| 
 | 
 | ||||||
|  | 	guard(raw_spinlock_irq)(&desc->lock); | ||||||
| 	raw_spin_lock_irq(&desc->lock); | 	raw_spin_lock_irq(&desc->lock); | ||||||
| 	if (desc->irq_data.domain) | 	if (desc->irq_data.domain) | ||||||
| 		ret = sysfs_emit(buf, "%lu\n", desc->irq_data.hwirq); | 		return sysfs_emit(buf, "%lu\n", desc->irq_data.hwirq); | ||||||
| 	raw_spin_unlock_irq(&desc->lock); | 	return 0; | ||||||
| 
 |  | ||||||
| 	return ret; |  | ||||||
| } | } | ||||||
| IRQ_ATTR_RO(hwirq); | IRQ_ATTR_RO(hwirq); | ||||||
| 
 | 
 | ||||||
| static ssize_t type_show(struct kobject *kobj, | static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||||
| 			 struct kobj_attribute *attr, char *buf) |  | ||||||
| { | { | ||||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||||
| 	ssize_t ret = 0; |  | ||||||
| 
 | 
 | ||||||
| 	raw_spin_lock_irq(&desc->lock); | 	guard(raw_spinlock_irq)(&desc->lock); | ||||||
| 	ret = sysfs_emit(buf, "%s\n", irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); | 	return sysfs_emit(buf, "%s\n", irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); | ||||||
| 	raw_spin_unlock_irq(&desc->lock); |  | ||||||
| 
 |  | ||||||
| 	return ret; |  | ||||||
| 
 | 
 | ||||||
| } | } | ||||||
| IRQ_ATTR_RO(type); | IRQ_ATTR_RO(type); | ||||||
| 
 | 
 | ||||||
| static ssize_t wakeup_show(struct kobject *kobj, | static ssize_t wakeup_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||||
| 			   struct kobj_attribute *attr, char *buf) |  | ||||||
| { | { | ||||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||||
| 	ssize_t ret = 0; |  | ||||||
| 
 |  | ||||||
| 	raw_spin_lock_irq(&desc->lock); |  | ||||||
| 	ret = sysfs_emit(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); |  | ||||||
| 	raw_spin_unlock_irq(&desc->lock); |  | ||||||
| 
 |  | ||||||
| 	return ret; |  | ||||||
| 
 | 
 | ||||||
|  | 	guard(raw_spinlock_irq)(&desc->lock); | ||||||
|  | 	return sysfs_emit(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); | ||||||
| } | } | ||||||
| IRQ_ATTR_RO(wakeup); | IRQ_ATTR_RO(wakeup); | ||||||
| 
 | 
 | ||||||
| static ssize_t name_show(struct kobject *kobj, | static ssize_t name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||||
| 			 struct kobj_attribute *attr, char *buf) |  | ||||||
| { | { | ||||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||||
| 	ssize_t ret = 0; |  | ||||||
| 
 | 
 | ||||||
| 	raw_spin_lock_irq(&desc->lock); | 	guard(raw_spinlock_irq)(&desc->lock); | ||||||
| 	if (desc->name) | 	if (desc->name) | ||||||
| 		ret = sysfs_emit(buf, "%s\n", desc->name); | 		return sysfs_emit(buf, "%s\n", desc->name); | ||||||
| 	raw_spin_unlock_irq(&desc->lock); | 	return 0; | ||||||
| 
 |  | ||||||
| 	return ret; |  | ||||||
| } | } | ||||||
| IRQ_ATTR_RO(name); | IRQ_ATTR_RO(name); | ||||||
| 
 | 
 | ||||||
| static ssize_t actions_show(struct kobject *kobj, | static ssize_t actions_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||||
| 			    struct kobj_attribute *attr, char *buf) |  | ||||||
| { | { | ||||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||||
| 	struct irqaction *action; | 	struct irqaction *action; | ||||||
| 	ssize_t ret = 0; | 	ssize_t ret = 0; | ||||||
| 	char *p = ""; | 	char *p = ""; | ||||||
| 
 | 
 | ||||||
| 	raw_spin_lock_irq(&desc->lock); | 	scoped_guard(raw_spinlock_irq, &desc->lock) { | ||||||
| 	for_each_action_of_desc(desc, action) { | 		for_each_action_of_desc(desc, action) { | ||||||
| 		ret += sysfs_emit_at(buf, ret, "%s%s", p, action->name); | 			ret += sysfs_emit_at(buf, ret, "%s%s", p, action->name); | ||||||
| 		p = ","; | 			p = ","; | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 	raw_spin_unlock_irq(&desc->lock); |  | ||||||
| 
 | 
 | ||||||
| 	if (ret) | 	if (ret) | ||||||
| 		ret += sysfs_emit_at(buf, ret, "\n"); | 		ret += sysfs_emit_at(buf, ret, "\n"); | ||||||
| 
 |  | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| IRQ_ATTR_RO(actions); | IRQ_ATTR_RO(actions); | ||||||
|  | @ -414,19 +389,14 @@ static int __init irq_sysfs_init(void) | ||||||
| 	int irq; | 	int irq; | ||||||
| 
 | 
 | ||||||
| 	/* Prevent concurrent irq alloc/free */ | 	/* Prevent concurrent irq alloc/free */ | ||||||
| 	irq_lock_sparse(); | 	guard(mutex)(&sparse_irq_lock); | ||||||
| 
 |  | ||||||
| 	irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); | 	irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); | ||||||
| 	if (!irq_kobj_base) { | 	if (!irq_kobj_base) | ||||||
| 		irq_unlock_sparse(); |  | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	/* Add the already allocated interrupts */ | 	/* Add the already allocated interrupts */ | ||||||
| 	for_each_irq_desc(irq, desc) | 	for_each_irq_desc(irq, desc) | ||||||
| 		irq_sysfs_add(irq, desc); | 		irq_sysfs_add(irq, desc); | ||||||
| 	irq_unlock_sparse(); |  | ||||||
| 
 |  | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| postcore_initcall(irq_sysfs_init); | postcore_initcall(irq_sysfs_init); | ||||||
|  | @ -569,12 +539,12 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node, | ||||||
| 	return -ENOMEM; | 	return -ENOMEM; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int irq_expand_nr_irqs(unsigned int nr) | static bool irq_expand_nr_irqs(unsigned int nr) | ||||||
| { | { | ||||||
| 	if (nr > MAX_SPARSE_IRQS) | 	if (nr > MAX_SPARSE_IRQS) | ||||||
| 		return -ENOMEM; | 		return false; | ||||||
| 	nr_irqs = nr; | 	nr_irqs = nr; | ||||||
| 	return 0; | 	return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| int __init early_irq_init(void) | int __init early_irq_init(void) | ||||||
|  | @ -652,11 +622,9 @@ EXPORT_SYMBOL(irq_to_desc); | ||||||
| static void free_desc(unsigned int irq) | static void free_desc(unsigned int irq) | ||||||
| { | { | ||||||
| 	struct irq_desc *desc = irq_to_desc(irq); | 	struct irq_desc *desc = irq_to_desc(irq); | ||||||
| 	unsigned long flags; |  | ||||||
| 
 | 
 | ||||||
| 	raw_spin_lock_irqsave(&desc->lock, flags); | 	scoped_guard(raw_spinlock_irqsave, &desc->lock) | ||||||
| 	desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); | 		desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); | ||||||
| 	raw_spin_unlock_irqrestore(&desc->lock, flags); |  | ||||||
| 	delete_irq_desc(irq); | 	delete_irq_desc(irq); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -675,16 +643,15 @@ static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, | ||||||
| 	return start; | 	return start; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int irq_expand_nr_irqs(unsigned int nr) | static inline bool irq_expand_nr_irqs(unsigned int nr) | ||||||
| { | { | ||||||
| 	return -ENOMEM; | 	return false; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void irq_mark_irq(unsigned int irq) | void irq_mark_irq(unsigned int irq) | ||||||
| { | { | ||||||
| 	mutex_lock(&sparse_irq_lock); | 	guard(mutex)(&sparse_irq_lock); | ||||||
| 	irq_insert_desc(irq, irq_desc + irq); | 	irq_insert_desc(irq, irq_desc + irq); | ||||||
| 	mutex_unlock(&sparse_irq_lock); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_GENERIC_IRQ_LEGACY | #ifdef CONFIG_GENERIC_IRQ_LEGACY | ||||||
|  | @ -823,11 +790,9 @@ void irq_free_descs(unsigned int from, unsigned int cnt) | ||||||
| 	if (from >= nr_irqs || (from + cnt) > nr_irqs) | 	if (from >= nr_irqs || (from + cnt) > nr_irqs) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	mutex_lock(&sparse_irq_lock); | 	guard(mutex)(&sparse_irq_lock); | ||||||
| 	for (i = 0; i < cnt; i++) | 	for (i = 0; i < cnt; i++) | ||||||
| 		free_desc(from + i); | 		free_desc(from + i); | ||||||
| 
 |  | ||||||
| 	mutex_unlock(&sparse_irq_lock); |  | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(irq_free_descs); | EXPORT_SYMBOL_GPL(irq_free_descs); | ||||||
| 
 | 
 | ||||||
|  | @ -844,11 +809,10 @@ EXPORT_SYMBOL_GPL(irq_free_descs); | ||||||
|  * |  * | ||||||
|  * Returns the first irq number or error code |  * Returns the first irq number or error code | ||||||
|  */ |  */ | ||||||
| int __ref | int __ref __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | ||||||
| __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | 			    struct module *owner, const struct irq_affinity_desc *affinity) | ||||||
| 		  struct module *owner, const struct irq_affinity_desc *affinity) |  | ||||||
| { | { | ||||||
| 	int start, ret; | 	int start; | ||||||
| 
 | 
 | ||||||
| 	if (!cnt) | 	if (!cnt) | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
|  | @ -866,22 +830,17 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | ||||||
| 		from = arch_dynirq_lower_bound(from); | 		from = arch_dynirq_lower_bound(from); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	mutex_lock(&sparse_irq_lock); | 	guard(mutex)(&sparse_irq_lock); | ||||||
| 
 | 
 | ||||||
| 	start = irq_find_free_area(from, cnt); | 	start = irq_find_free_area(from, cnt); | ||||||
| 	ret = -EEXIST; |  | ||||||
| 	if (irq >=0 && start != irq) | 	if (irq >=0 && start != irq) | ||||||
| 		goto unlock; | 		return -EEXIST; | ||||||
| 
 | 
 | ||||||
| 	if (start + cnt > nr_irqs) { | 	if (start + cnt > nr_irqs) { | ||||||
| 		ret = irq_expand_nr_irqs(start + cnt); | 		if (!irq_expand_nr_irqs(start + cnt)) | ||||||
| 		if (ret) | 			return -ENOMEM; | ||||||
| 			goto unlock; |  | ||||||
| 	} | 	} | ||||||
| 	ret = alloc_descs(start, cnt, node, affinity, owner); | 	return alloc_descs(start, cnt, node, affinity, owner); | ||||||
| unlock: |  | ||||||
| 	mutex_unlock(&sparse_irq_lock); |  | ||||||
| 	return ret; |  | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(__irq_alloc_descs); | EXPORT_SYMBOL_GPL(__irq_alloc_descs); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Thomas Gleixner
						Thomas Gleixner