mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	genirq/irqdesc: Switch to lock guards
Replace all lock/unlock pairs with lock guards and simplify the code flow. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Jiri Slaby <jirislaby@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/all/871ptaqhoo.ffs@tglx
This commit is contained in:
		
							parent
							
								
									0f70a49f3f
								
							
						
					
					
						commit
						5d964a9f7c
					
				
					 1 changed files with 44 additions and 85 deletions
				
			
		|  | @ -246,8 +246,7 @@ static struct kobject *irq_kobj_base; | |||
| #define IRQ_ATTR_RO(_name) \ | ||||
| static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | ||||
| 
 | ||||
| static ssize_t per_cpu_count_show(struct kobject *kobj, | ||||
| 				  struct kobj_attribute *attr, char *buf) | ||||
| static ssize_t per_cpu_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||
| { | ||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||
| 	ssize_t ret = 0; | ||||
|  | @ -266,99 +265,75 @@ static ssize_t per_cpu_count_show(struct kobject *kobj, | |||
| } | ||||
| IRQ_ATTR_RO(per_cpu_count); | ||||
| 
 | ||||
| static ssize_t chip_name_show(struct kobject *kobj, | ||||
| 			      struct kobj_attribute *attr, char *buf) | ||||
| static ssize_t chip_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||
| { | ||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||
| 	ssize_t ret = 0; | ||||
| 
 | ||||
| 	raw_spin_lock_irq(&desc->lock); | ||||
| 	guard(raw_spinlock_irq)(&desc->lock); | ||||
| 	if (desc->irq_data.chip && desc->irq_data.chip->name) | ||||
| 		ret = sysfs_emit(buf, "%s\n", desc->irq_data.chip->name); | ||||
| 	raw_spin_unlock_irq(&desc->lock); | ||||
| 
 | ||||
| 	return ret; | ||||
| 		return sysfs_emit(buf, "%s\n", desc->irq_data.chip->name); | ||||
| 	return 0; | ||||
| } | ||||
| IRQ_ATTR_RO(chip_name); | ||||
| 
 | ||||
| static ssize_t hwirq_show(struct kobject *kobj, | ||||
| 			  struct kobj_attribute *attr, char *buf) | ||||
| static ssize_t hwirq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||
| { | ||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||
| 	ssize_t ret = 0; | ||||
| 
 | ||||
| 	guard(raw_spinlock_irq)(&desc->lock); | ||||
| 	raw_spin_lock_irq(&desc->lock); | ||||
| 	if (desc->irq_data.domain) | ||||
| 		ret = sysfs_emit(buf, "%lu\n", desc->irq_data.hwirq); | ||||
| 	raw_spin_unlock_irq(&desc->lock); | ||||
| 
 | ||||
| 	return ret; | ||||
| 		return sysfs_emit(buf, "%lu\n", desc->irq_data.hwirq); | ||||
| 	return 0; | ||||
| } | ||||
| IRQ_ATTR_RO(hwirq); | ||||
| 
 | ||||
| static ssize_t type_show(struct kobject *kobj, | ||||
| 			 struct kobj_attribute *attr, char *buf) | ||||
| static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||
| { | ||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||
| 	ssize_t ret = 0; | ||||
| 
 | ||||
| 	raw_spin_lock_irq(&desc->lock); | ||||
| 	ret = sysfs_emit(buf, "%s\n", irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); | ||||
| 	raw_spin_unlock_irq(&desc->lock); | ||||
| 
 | ||||
| 	return ret; | ||||
| 	guard(raw_spinlock_irq)(&desc->lock); | ||||
| 	return sysfs_emit(buf, "%s\n", irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); | ||||
| 
 | ||||
| } | ||||
| IRQ_ATTR_RO(type); | ||||
| 
 | ||||
| static ssize_t wakeup_show(struct kobject *kobj, | ||||
| 			   struct kobj_attribute *attr, char *buf) | ||||
| static ssize_t wakeup_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||
| { | ||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||
| 	ssize_t ret = 0; | ||||
| 
 | ||||
| 	raw_spin_lock_irq(&desc->lock); | ||||
| 	ret = sysfs_emit(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); | ||||
| 	raw_spin_unlock_irq(&desc->lock); | ||||
| 
 | ||||
| 	return ret; | ||||
| 
 | ||||
| 	guard(raw_spinlock_irq)(&desc->lock); | ||||
| 	return sysfs_emit(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); | ||||
| } | ||||
| IRQ_ATTR_RO(wakeup); | ||||
| 
 | ||||
| static ssize_t name_show(struct kobject *kobj, | ||||
| 			 struct kobj_attribute *attr, char *buf) | ||||
| static ssize_t name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||
| { | ||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||
| 	ssize_t ret = 0; | ||||
| 
 | ||||
| 	raw_spin_lock_irq(&desc->lock); | ||||
| 	guard(raw_spinlock_irq)(&desc->lock); | ||||
| 	if (desc->name) | ||||
| 		ret = sysfs_emit(buf, "%s\n", desc->name); | ||||
| 	raw_spin_unlock_irq(&desc->lock); | ||||
| 
 | ||||
| 	return ret; | ||||
| 		return sysfs_emit(buf, "%s\n", desc->name); | ||||
| 	return 0; | ||||
| } | ||||
| IRQ_ATTR_RO(name); | ||||
| 
 | ||||
| static ssize_t actions_show(struct kobject *kobj, | ||||
| 			    struct kobj_attribute *attr, char *buf) | ||||
| static ssize_t actions_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) | ||||
| { | ||||
| 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||||
| 	struct irqaction *action; | ||||
| 	ssize_t ret = 0; | ||||
| 	char *p = ""; | ||||
| 
 | ||||
| 	raw_spin_lock_irq(&desc->lock); | ||||
| 	for_each_action_of_desc(desc, action) { | ||||
| 		ret += sysfs_emit_at(buf, ret, "%s%s", p, action->name); | ||||
| 		p = ","; | ||||
| 	scoped_guard(raw_spinlock_irq, &desc->lock) { | ||||
| 		for_each_action_of_desc(desc, action) { | ||||
| 			ret += sysfs_emit_at(buf, ret, "%s%s", p, action->name); | ||||
| 			p = ","; | ||||
| 		} | ||||
| 	} | ||||
| 	raw_spin_unlock_irq(&desc->lock); | ||||
| 
 | ||||
| 	if (ret) | ||||
| 		ret += sysfs_emit_at(buf, ret, "\n"); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| IRQ_ATTR_RO(actions); | ||||
|  | @ -414,19 +389,14 @@ static int __init irq_sysfs_init(void) | |||
| 	int irq; | ||||
| 
 | ||||
| 	/* Prevent concurrent irq alloc/free */ | ||||
| 	irq_lock_sparse(); | ||||
| 
 | ||||
| 	guard(mutex)(&sparse_irq_lock); | ||||
| 	irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); | ||||
| 	if (!irq_kobj_base) { | ||||
| 		irq_unlock_sparse(); | ||||
| 	if (!irq_kobj_base) | ||||
| 		return -ENOMEM; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Add the already allocated interrupts */ | ||||
| 	for_each_irq_desc(irq, desc) | ||||
| 		irq_sysfs_add(irq, desc); | ||||
| 	irq_unlock_sparse(); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| postcore_initcall(irq_sysfs_init); | ||||
|  | @ -569,12 +539,12 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node, | |||
| 	return -ENOMEM; | ||||
| } | ||||
| 
 | ||||
| static int irq_expand_nr_irqs(unsigned int nr) | ||||
| static bool irq_expand_nr_irqs(unsigned int nr) | ||||
| { | ||||
| 	if (nr > MAX_SPARSE_IRQS) | ||||
| 		return -ENOMEM; | ||||
| 		return false; | ||||
| 	nr_irqs = nr; | ||||
| 	return 0; | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| int __init early_irq_init(void) | ||||
|  | @ -652,11 +622,9 @@ EXPORT_SYMBOL(irq_to_desc); | |||
| static void free_desc(unsigned int irq) | ||||
| { | ||||
| 	struct irq_desc *desc = irq_to_desc(irq); | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	raw_spin_lock_irqsave(&desc->lock, flags); | ||||
| 	desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); | ||||
| 	raw_spin_unlock_irqrestore(&desc->lock, flags); | ||||
| 	scoped_guard(raw_spinlock_irqsave, &desc->lock) | ||||
| 		desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); | ||||
| 	delete_irq_desc(irq); | ||||
| } | ||||
| 
 | ||||
|  | @ -675,16 +643,15 @@ static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, | |||
| 	return start; | ||||
| } | ||||
| 
 | ||||
| static int irq_expand_nr_irqs(unsigned int nr) | ||||
| static inline bool irq_expand_nr_irqs(unsigned int nr) | ||||
| { | ||||
| 	return -ENOMEM; | ||||
| 	return false; | ||||
| } | ||||
| 
 | ||||
| void irq_mark_irq(unsigned int irq) | ||||
| { | ||||
| 	mutex_lock(&sparse_irq_lock); | ||||
| 	guard(mutex)(&sparse_irq_lock); | ||||
| 	irq_insert_desc(irq, irq_desc + irq); | ||||
| 	mutex_unlock(&sparse_irq_lock); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_GENERIC_IRQ_LEGACY | ||||
|  | @ -823,11 +790,9 @@ void irq_free_descs(unsigned int from, unsigned int cnt) | |||
| 	if (from >= nr_irqs || (from + cnt) > nr_irqs) | ||||
| 		return; | ||||
| 
 | ||||
| 	mutex_lock(&sparse_irq_lock); | ||||
| 	guard(mutex)(&sparse_irq_lock); | ||||
| 	for (i = 0; i < cnt; i++) | ||||
| 		free_desc(from + i); | ||||
| 
 | ||||
| 	mutex_unlock(&sparse_irq_lock); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(irq_free_descs); | ||||
| 
 | ||||
|  | @ -844,11 +809,10 @@ EXPORT_SYMBOL_GPL(irq_free_descs); | |||
|  * | ||||
|  * Returns the first irq number or error code | ||||
|  */ | ||||
| int __ref | ||||
| __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | ||||
| 		  struct module *owner, const struct irq_affinity_desc *affinity) | ||||
| int __ref __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | ||||
| 			    struct module *owner, const struct irq_affinity_desc *affinity) | ||||
| { | ||||
| 	int start, ret; | ||||
| 	int start; | ||||
| 
 | ||||
| 	if (!cnt) | ||||
| 		return -EINVAL; | ||||
|  | @ -866,22 +830,17 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | |||
| 		from = arch_dynirq_lower_bound(from); | ||||
| 	} | ||||
| 
 | ||||
| 	mutex_lock(&sparse_irq_lock); | ||||
| 	guard(mutex)(&sparse_irq_lock); | ||||
| 
 | ||||
| 	start = irq_find_free_area(from, cnt); | ||||
| 	ret = -EEXIST; | ||||
| 	if (irq >=0 && start != irq) | ||||
| 		goto unlock; | ||||
| 		return -EEXIST; | ||||
| 
 | ||||
| 	if (start + cnt > nr_irqs) { | ||||
| 		ret = irq_expand_nr_irqs(start + cnt); | ||||
| 		if (ret) | ||||
| 			goto unlock; | ||||
| 		if (!irq_expand_nr_irqs(start + cnt)) | ||||
| 			return -ENOMEM; | ||||
| 	} | ||||
| 	ret = alloc_descs(start, cnt, node, affinity, owner); | ||||
| unlock: | ||||
| 	mutex_unlock(&sparse_irq_lock); | ||||
| 	return ret; | ||||
| 	return alloc_descs(start, cnt, node, affinity, owner); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(__irq_alloc_descs); | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Thomas Gleixner
						Thomas Gleixner