mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	ARM: 6379/1: Assume new page cache pages have dirty D-cache
There are places in Linux where writes to newly allocated page cache pages happen without a subsequent call to flush_dcache_page() (several PIO drivers including USB HCD). This patch changes the meaning of PG_arch_1 to be PG_dcache_clean and always flush the D-cache for a newly mapped page in update_mmu_cache(). The patch also sets the PG_arch_1 bit in the DMA cache maintenance function to avoid additional cache flushing in update_mmu_cache(). Tested-by: Rabin Vincent <rabin.vincent@stericsson.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
		
							parent
							
								
									0fc73099dd
								
							
						
					
					
						commit
						c01778001a
					
				
					 8 changed files with 17 additions and 10 deletions
				
			
		| 
						 | 
				
			
			@ -137,10 +137,10 @@
 | 
			
		|||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * This flag is used to indicate that the page pointed to by a pte
 | 
			
		||||
 * is dirty and requires cleaning before returning it to the user.
 | 
			
		||||
 * This flag is used to indicate that the page pointed to by a pte is clean
 | 
			
		||||
 * and does not require cleaning before returning it to the user.
 | 
			
		||||
 */
 | 
			
		||||
#define PG_dcache_dirty PG_arch_1
 | 
			
		||||
#define PG_dcache_clean PG_arch_1
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 *	MM Cache Management
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -560,7 +560,7 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 | 
			
		|||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * if PG_dcache_dirty is set for the page, we need to ensure that any
 | 
			
		||||
 * If PG_dcache_clean is not set for the page, we need to ensure that any
 | 
			
		||||
 * cache entries for the kernels virtual memory range are written
 | 
			
		||||
 * back to the page.
 | 
			
		||||
 */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -73,7 +73,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
 | 
			
		|||
{
 | 
			
		||||
	void *kto = kmap_atomic(to, KM_USER1);
 | 
			
		||||
 | 
			
		||||
	if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
 | 
			
		||||
	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
 | 
			
		||||
		__flush_dcache_page(page_mapping(from), from);
 | 
			
		||||
 | 
			
		||||
	spin_lock(&minicache_lock);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -79,7 +79,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
 | 
			
		|||
	unsigned int offset = CACHE_COLOUR(vaddr);
 | 
			
		||||
	unsigned long kfrom, kto;
 | 
			
		||||
 | 
			
		||||
	if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
 | 
			
		||||
	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
 | 
			
		||||
		__flush_dcache_page(page_mapping(from), from);
 | 
			
		||||
 | 
			
		||||
	/* FIXME: not highmem safe */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
 | 
			
		|||
{
 | 
			
		||||
	void *kto = kmap_atomic(to, KM_USER1);
 | 
			
		||||
 | 
			
		||||
	if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
 | 
			
		||||
	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
 | 
			
		||||
		__flush_dcache_page(page_mapping(from), from);
 | 
			
		||||
 | 
			
		||||
	spin_lock(&minicache_lock);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -523,6 +523,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
 | 
			
		|||
		outer_inv_range(paddr, paddr + size);
 | 
			
		||||
 | 
			
		||||
	dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Mark the D-cache clean for this page to avoid extra flushing.
 | 
			
		||||
	 */
 | 
			
		||||
	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
 | 
			
		||||
		set_bit(PG_dcache_clean, &page->flags);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(___dma_page_dev_to_cpu);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -141,7 +141,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
 | 
			
		|||
 * a page table, or changing an existing PTE.  Basically, there are two
 | 
			
		||||
 * things that we need to take care of:
 | 
			
		||||
 *
 | 
			
		||||
 *  1. If PG_dcache_dirty is set for the page, we need to ensure
 | 
			
		||||
 *  1. If PG_dcache_clean is not set for the page, we need to ensure
 | 
			
		||||
 *     that any cache entries for the kernels virtual memory
 | 
			
		||||
 *     range are written back to the page.
 | 
			
		||||
 *  2. If we have multiple shared mappings of the same space in
 | 
			
		||||
| 
						 | 
				
			
			@ -169,7 +169,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 | 
			
		|||
 | 
			
		||||
	mapping = page_mapping(page);
 | 
			
		||||
#ifndef CONFIG_SMP
 | 
			
		||||
	if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
 | 
			
		||||
	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
 | 
			
		||||
		__flush_dcache_page(mapping, page);
 | 
			
		||||
#endif
 | 
			
		||||
	if (mapping) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -248,7 +248,7 @@ void flush_dcache_page(struct page *page)
 | 
			
		|||
 | 
			
		||||
#ifndef CONFIG_SMP
 | 
			
		||||
	if (mapping && !mapping_mapped(mapping))
 | 
			
		||||
		set_bit(PG_dcache_dirty, &page->flags);
 | 
			
		||||
		clear_bit(PG_dcache_clean, &page->flags);
 | 
			
		||||
	else
 | 
			
		||||
#endif
 | 
			
		||||
	{
 | 
			
		||||
| 
						 | 
				
			
			@ -257,6 +257,7 @@ void flush_dcache_page(struct page *page)
 | 
			
		|||
			__flush_dcache_aliases(mapping, page);
 | 
			
		||||
		else if (mapping)
 | 
			
		||||
			__flush_icache_all();
 | 
			
		||||
		set_bit(PG_dcache_clean, &page->flags);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(flush_dcache_page);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue