forked from mirrors/linux
		
	make vfree() safe to call from interrupt contexts
A bunch of RCU callbacks want to be able to do vfree() and end up with rather kludgy schemes. Just let vfree() do the right thing - put the victim on llist and schedule actual __vunmap() via schedule_work(), so that it runs from non-interrupt context. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
		
							parent
							
								
									6dbe51c251
								
							
						
					
					
						commit
						32fcfd4071
					
				
					 1 changed files with 40 additions and 5 deletions
				
			
		
							
								
								
									
										39
									
								
								mm/vmalloc.c
									
									
									
									
									
								
							
							
						
						
									
										39
									
								
								mm/vmalloc.c
									
									
									
									
									
								
							| 
						 | 
					@ -27,10 +27,30 @@
 | 
				
			||||||
#include <linux/pfn.h>
 | 
					#include <linux/pfn.h>
 | 
				
			||||||
#include <linux/kmemleak.h>
 | 
					#include <linux/kmemleak.h>
 | 
				
			||||||
#include <linux/atomic.h>
 | 
					#include <linux/atomic.h>
 | 
				
			||||||
 | 
					#include <linux/llist.h>
 | 
				
			||||||
#include <asm/uaccess.h>
 | 
					#include <asm/uaccess.h>
 | 
				
			||||||
#include <asm/tlbflush.h>
 | 
					#include <asm/tlbflush.h>
 | 
				
			||||||
#include <asm/shmparam.h>
 | 
					#include <asm/shmparam.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct vfree_deferred {
 | 
				
			||||||
 | 
						struct llist_head list;
 | 
				
			||||||
 | 
						struct work_struct wq;
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void __vunmap(const void *, int);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void free_work(struct work_struct *w)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
 | 
				
			||||||
 | 
						struct llist_node *llnode = llist_del_all(&p->list);
 | 
				
			||||||
 | 
						while (llnode) {
 | 
				
			||||||
 | 
							void *p = llnode;
 | 
				
			||||||
 | 
							llnode = llist_next(llnode);
 | 
				
			||||||
 | 
							__vunmap(p, 1);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*** Page table manipulation functions ***/
 | 
					/*** Page table manipulation functions ***/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
 | 
					static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
 | 
				
			||||||
| 
						 | 
					@ -1184,10 +1204,14 @@ void __init vmalloc_init(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_possible_cpu(i) {
 | 
						for_each_possible_cpu(i) {
 | 
				
			||||||
		struct vmap_block_queue *vbq;
 | 
							struct vmap_block_queue *vbq;
 | 
				
			||||||
 | 
							struct vfree_deferred *p;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		vbq = &per_cpu(vmap_block_queue, i);
 | 
							vbq = &per_cpu(vmap_block_queue, i);
 | 
				
			||||||
		spin_lock_init(&vbq->lock);
 | 
							spin_lock_init(&vbq->lock);
 | 
				
			||||||
		INIT_LIST_HEAD(&vbq->free);
 | 
							INIT_LIST_HEAD(&vbq->free);
 | 
				
			||||||
 | 
							p = &per_cpu(vfree_deferred, i);
 | 
				
			||||||
 | 
							init_llist_head(&p->list);
 | 
				
			||||||
 | 
							INIT_WORK(&p->wq, free_work);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Import existing vmlist entries. */
 | 
						/* Import existing vmlist entries. */
 | 
				
			||||||
| 
						 | 
					@ -1520,14 +1544,24 @@ static void __vunmap(const void *addr, int deallocate_pages)
 | 
				
			||||||
 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
 | 
					 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
 | 
				
			||||||
 *	NULL, no operation is performed.
 | 
					 *	NULL, no operation is performed.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 *	Must not be called in interrupt context.
 | 
					 *	Must not be called in NMI context (strictly speaking, only if we don't
 | 
				
			||||||
 | 
					 *	have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
 | 
				
			||||||
 | 
					 *	conventions for vfree() arch-depenedent would be a really bad idea)
 | 
				
			||||||
 | 
					 *	
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void vfree(const void *addr)
 | 
					void vfree(const void *addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	BUG_ON(in_interrupt());
 | 
						BUG_ON(in_nmi());
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kmemleak_free(addr);
 | 
						kmemleak_free(addr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!addr)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						if (unlikely(in_interrupt())) {
 | 
				
			||||||
 | 
							struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
 | 
				
			||||||
 | 
							llist_add((struct llist_node *)addr, &p->list);
 | 
				
			||||||
 | 
							schedule_work(&p->wq);
 | 
				
			||||||
 | 
						} else
 | 
				
			||||||
		__vunmap(addr, 1);
 | 
							__vunmap(addr, 1);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(vfree);
 | 
					EXPORT_SYMBOL(vfree);
 | 
				
			||||||
| 
						 | 
					@ -1545,6 +1579,7 @@ void vunmap(const void *addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	BUG_ON(in_interrupt());
 | 
						BUG_ON(in_interrupt());
 | 
				
			||||||
	might_sleep();
 | 
						might_sleep();
 | 
				
			||||||
 | 
						if (addr)
 | 
				
			||||||
		__vunmap(addr, 0);
 | 
							__vunmap(addr, 0);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(vunmap);
 | 
					EXPORT_SYMBOL(vunmap);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue