forked from mirrors/linux
		
	android: binder: drop lru lock in isolate callback
Drop the global lru lock in isolate callback before calling zap_page_range which calls cond_resched, and re-acquire the global lru lock before returning. Also change return code to LRU_REMOVED_RETRY. Use mmput_async when fail to acquire mmap sem in an atomic context. Fix "BUG: sleeping function called from invalid context" errors when CONFIG_DEBUG_ATOMIC_SLEEP is enabled. Also restore mmput_async, which was initially introduced in commitec8d7c14ea("mm, oom_reaper: do not mmput synchronously from the oom reaper context"), and was removed in commit2129258024("mm: oom: let oom_reap_task and exit_mmap run concurrently"). Link: http://lkml.kernel.org/r/20170914182231.90908-1-sherryy@android.com Fixes:f2517eb76f("android: binder: Add global lru shrinker to binder") Signed-off-by: Sherry Yang <sherryy@android.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Reported-by: Kyle Yan <kyan@codeaurora.org> Acked-by: Arve Hjønnevåg <arve@android.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Martijn Coenen <maco@google.com> Cc: Todd Kjos <tkjos@google.com> Cc: Riley Andrews <riandrews@android.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hdanton@sina.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Andy Lutomirski <luto@kernel.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Hoeun Ryu <hoeun.ryu@gmail.com> Cc: Christopher Lameter <cl@linux.com> Cc: Vegard Nossum <vegard.nossum@oracle.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									3f2eb0287e
								
							
						
					
					
						commit
						a1b2289cef
					
				
					 3 changed files with 36 additions and 6 deletions
				
			
		| 
						 | 
				
			
			@ -913,6 +913,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 | 
			
		|||
	struct binder_alloc *alloc;
 | 
			
		||||
	uintptr_t page_addr;
 | 
			
		||||
	size_t index;
 | 
			
		||||
	struct vm_area_struct *vma;
 | 
			
		||||
 | 
			
		||||
	alloc = page->alloc;
 | 
			
		||||
	if (!mutex_trylock(&alloc->mutex))
 | 
			
		||||
| 
						 | 
				
			
			@ -923,16 +924,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 | 
			
		|||
 | 
			
		||||
	index = page - alloc->pages;
 | 
			
		||||
	page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
 | 
			
		||||
	if (alloc->vma) {
 | 
			
		||||
	vma = alloc->vma;
 | 
			
		||||
	if (vma) {
 | 
			
		||||
		mm = get_task_mm(alloc->tsk);
 | 
			
		||||
		if (!mm)
 | 
			
		||||
			goto err_get_task_mm_failed;
 | 
			
		||||
		if (!down_write_trylock(&mm->mmap_sem))
 | 
			
		||||
			goto err_down_write_mmap_sem_failed;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	list_lru_isolate(lru, item);
 | 
			
		||||
	spin_unlock(lock);
 | 
			
		||||
 | 
			
		||||
	if (vma) {
 | 
			
		||||
		trace_binder_unmap_user_start(alloc, index);
 | 
			
		||||
 | 
			
		||||
		zap_page_range(alloc->vma,
 | 
			
		||||
		zap_page_range(vma,
 | 
			
		||||
			       page_addr + alloc->user_buffer_offset,
 | 
			
		||||
			       PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -950,13 +957,12 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 | 
			
		|||
 | 
			
		||||
	trace_binder_unmap_kernel_end(alloc, index);
 | 
			
		||||
 | 
			
		||||
	list_lru_isolate(lru, item);
 | 
			
		||||
 | 
			
		||||
	spin_lock(lock);
 | 
			
		||||
	mutex_unlock(&alloc->mutex);
 | 
			
		||||
	return LRU_REMOVED;
 | 
			
		||||
	return LRU_REMOVED_RETRY;
 | 
			
		||||
 | 
			
		||||
err_down_write_mmap_sem_failed:
 | 
			
		||||
	mmput(mm);
 | 
			
		||||
	mmput_async(mm);
 | 
			
		||||
err_get_task_mm_failed:
 | 
			
		||||
err_page_already_freed:
 | 
			
		||||
	mutex_unlock(&alloc->mutex);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -84,6 +84,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
 | 
			
		|||
 | 
			
		||||
/* mmput gets rid of the mappings and all user-space */
 | 
			
		||||
extern void mmput(struct mm_struct *);
 | 
			
		||||
#ifdef CONFIG_MMU
 | 
			
		||||
/* same as above but performs the slow path from the async context. Can
 | 
			
		||||
 * be called from the atomic context as well
 | 
			
		||||
 */
 | 
			
		||||
void mmput_async(struct mm_struct *);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/* Grab a reference to a task's mm, if it is not already going away */
 | 
			
		||||
extern struct mm_struct *get_task_mm(struct task_struct *task);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -946,6 +946,24 @@ void mmput(struct mm_struct *mm)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(mmput);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_MMU
 | 
			
		||||
static void mmput_async_fn(struct work_struct *work)
 | 
			
		||||
{
 | 
			
		||||
	struct mm_struct *mm = container_of(work, struct mm_struct,
 | 
			
		||||
					    async_put_work);
 | 
			
		||||
 | 
			
		||||
	__mmput(mm);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void mmput_async(struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
	if (atomic_dec_and_test(&mm->mm_users)) {
 | 
			
		||||
		INIT_WORK(&mm->async_put_work, mmput_async_fn);
 | 
			
		||||
		schedule_work(&mm->async_put_work);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * set_mm_exe_file - change a reference to the mm's executable file
 | 
			
		||||
 *
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue