mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	Thanks to commit 4b3ef9daa4 ("mm/swap: split swap cache into 64MB
trunks"), after swapoff the address_space associated with the swap
device will be freed.  So page_mapping() users which may touch the
address_space need some kind of mechanism to prevent the address_space
from being freed during accessing.
The dcache flushing functions (flush_dcache_page(), etc) in architecture
specific code may access the address_space of swap device for anonymous
pages in swap cache via page_mapping() function.  But in some cases
there are no mechanisms to prevent the swap device from being swapoff,
for example,
  CPU1					CPU2
  __get_user_pages()			swapoff()
    flush_dcache_page()
      mapping = page_mapping()
        ...				  exit_swap_address_space()
        ...				    kvfree(spaces)
        mapping_mapped(mapping)
The address space may be accessed after being freed.
But from cachetlb.txt and Russell King, flush_dcache_page() only care
about file cache pages, for anonymous pages, flush_anon_page() should be
used.  The implementation of flush_dcache_page() in all architectures
follows this too.  They will check whether page_mapping() is NULL and
whether mapping_mapped() is true to determine whether to flush the
dcache immediately.  And they will use interval tree (mapping->i_mmap)
to find all user space mappings.  While mapping_mapped() and
mapping->i_mmap isn't used by anonymous pages in swap cache at all.
So, to fix the race between swapoff and flush dcache, __page_mapping()
is add to return the address_space for file cache pages and NULL
otherwise.  All page_mapping() invoking in flush dcache functions are
replaced with page_mapping_file().
[akpm@linux-foundation.org: simplify page_mapping_file(), per Mike]
Link: http://lkml.kernel.org/r/20180305083634.15174-1-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Chen Liqin <liqin.linux@gmail.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Zankel <chris@zankel.net>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			135 lines
		
	
	
	
		
			3.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			135 lines
		
	
	
	
		
			3.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 *  linux/arch/arm/lib/copypage-xscale.S
 | 
						|
 *
 | 
						|
 *  Copyright (C) 1995-2005 Russell King
 | 
						|
 *
 | 
						|
 * This program is free software; you can redistribute it and/or modify
 | 
						|
 * it under the terms of the GNU General Public License version 2 as
 | 
						|
 * published by the Free Software Foundation.
 | 
						|
 *
 | 
						|
 * This handles the mini data cache, as found on SA11x0 and XScale
 | 
						|
 * processors.  When we copy a user page page, we map it in such a way
 | 
						|
 * that accesses to this page will not touch the main data cache, but
 | 
						|
 * will be cached in the mini data cache.  This prevents us thrashing
 | 
						|
 * the main data cache on page faults.
 | 
						|
 */
 | 
						|
#include <linux/init.h>
 | 
						|
#include <linux/mm.h>
 | 
						|
#include <linux/highmem.h>
 | 
						|
 | 
						|
#include <asm/pgtable.h>
 | 
						|
#include <asm/tlbflush.h>
 | 
						|
#include <asm/cacheflush.h>
 | 
						|
 | 
						|
#include "mm.h"
 | 
						|
 | 
						|
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
 | 
						|
				  L_PTE_MT_MINICACHE)
 | 
						|
 | 
						|
static DEFINE_RAW_SPINLOCK(minicache_lock);
 | 
						|
 | 
						|
/*
 | 
						|
 * XScale mini-dcache optimised copy_user_highpage
 | 
						|
 *
 | 
						|
 * We flush the destination cache lines just before we write the data into the
 | 
						|
 * corresponding address.  Since the Dcache is read-allocate, this removes the
 | 
						|
 * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
 | 
						|
 * and merged as appropriate.
 | 
						|
 */
 | 
						|
static void __naked
 | 
						|
mc_copy_user_page(void *from, void *to)
 | 
						|
{
 | 
						|
	/*
 | 
						|
	 * Strangely enough, best performance is achieved
 | 
						|
	 * when prefetching destination as well.  (NP)
 | 
						|
	 */
 | 
						|
	asm volatile(
 | 
						|
	"stmfd	sp!, {r4, r5, lr}		\n\
 | 
						|
	mov	lr, %2				\n\
 | 
						|
	pld	[r0, #0]			\n\
 | 
						|
	pld	[r0, #32]			\n\
 | 
						|
	pld	[r1, #0]			\n\
 | 
						|
	pld	[r1, #32]			\n\
 | 
						|
1:	pld	[r0, #64]			\n\
 | 
						|
	pld	[r0, #96]			\n\
 | 
						|
	pld	[r1, #64]			\n\
 | 
						|
	pld	[r1, #96]			\n\
 | 
						|
2:	ldrd	r2, [r0], #8			\n\
 | 
						|
	ldrd	r4, [r0], #8			\n\
 | 
						|
	mov	ip, r1				\n\
 | 
						|
	strd	r2, [r1], #8			\n\
 | 
						|
	ldrd	r2, [r0], #8			\n\
 | 
						|
	strd	r4, [r1], #8			\n\
 | 
						|
	ldrd	r4, [r0], #8			\n\
 | 
						|
	strd	r2, [r1], #8			\n\
 | 
						|
	strd	r4, [r1], #8			\n\
 | 
						|
	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
 | 
						|
	ldrd	r2, [r0], #8			\n\
 | 
						|
	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
 | 
						|
	ldrd	r4, [r0], #8			\n\
 | 
						|
	mov	ip, r1				\n\
 | 
						|
	strd	r2, [r1], #8			\n\
 | 
						|
	ldrd	r2, [r0], #8			\n\
 | 
						|
	strd	r4, [r1], #8			\n\
 | 
						|
	ldrd	r4, [r0], #8			\n\
 | 
						|
	strd	r2, [r1], #8			\n\
 | 
						|
	strd	r4, [r1], #8			\n\
 | 
						|
	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
 | 
						|
	subs	lr, lr, #1			\n\
 | 
						|
	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
 | 
						|
	bgt	1b				\n\
 | 
						|
	beq	2b				\n\
 | 
						|
	ldmfd	sp!, {r4, r5, pc}		"
 | 
						|
	:
 | 
						|
	: "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
 | 
						|
}
 | 
						|
 | 
						|
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
 | 
						|
	unsigned long vaddr, struct vm_area_struct *vma)
 | 
						|
{
 | 
						|
	void *kto = kmap_atomic(to);
 | 
						|
 | 
						|
	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
 | 
						|
		__flush_dcache_page(page_mapping_file(from), from);
 | 
						|
 | 
						|
	raw_spin_lock(&minicache_lock);
 | 
						|
 | 
						|
	set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
 | 
						|
 | 
						|
	mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
 | 
						|
 | 
						|
	raw_spin_unlock(&minicache_lock);
 | 
						|
 | 
						|
	kunmap_atomic(kto);
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * XScale optimised clear_user_page
 | 
						|
 */
 | 
						|
void
 | 
						|
xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
 | 
						|
{
 | 
						|
	void *ptr, *kaddr = kmap_atomic(page);
 | 
						|
	asm volatile(
 | 
						|
	"mov	r1, %2				\n\
 | 
						|
	mov	r2, #0				\n\
 | 
						|
	mov	r3, #0				\n\
 | 
						|
1:	mov	ip, %0				\n\
 | 
						|
	strd	r2, [%0], #8			\n\
 | 
						|
	strd	r2, [%0], #8			\n\
 | 
						|
	strd	r2, [%0], #8			\n\
 | 
						|
	strd	r2, [%0], #8			\n\
 | 
						|
	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
 | 
						|
	subs	r1, r1, #1			\n\
 | 
						|
	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
 | 
						|
	bne	1b"
 | 
						|
	: "=r" (ptr)
 | 
						|
	: "0" (kaddr), "I" (PAGE_SIZE / 32)
 | 
						|
	: "r1", "r2", "r3", "ip");
 | 
						|
	kunmap_atomic(kaddr);
 | 
						|
}
 | 
						|
 | 
						|
struct cpu_user_fns xscale_mc_user_fns __initdata = {
 | 
						|
	.cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
 | 
						|
	.cpu_copy_user_highpage	= xscale_mc_copy_user_highpage,
 | 
						|
};
 |