mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	MIPS: Loongson: Get rid of Loongson 2 #ifdefery all over arch/mips.
It was ugly. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
		
							parent
							
								
									7b784c634b
								
							
						
					
					
						commit
						14bd8c0820
					
				
					 5 changed files with 170 additions and 130 deletions
				
			
		| 
						 | 
				
			
			@ -20,11 +20,7 @@
 | 
			
		|||
#define Index_Load_Tag_D		0x05
 | 
			
		||||
#define Index_Store_Tag_I		0x08
 | 
			
		||||
#define Index_Store_Tag_D		0x09
 | 
			
		||||
#if defined(CONFIG_CPU_LOONGSON2)
 | 
			
		||||
#define Hit_Invalidate_I		0x00
 | 
			
		||||
#else
 | 
			
		||||
#define Hit_Invalidate_I		0x10
 | 
			
		||||
#endif
 | 
			
		||||
#define Hit_Invalidate_D		0x11
 | 
			
		||||
#define Hit_Writeback_Inv_D		0x15
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -84,4 +80,9 @@
 | 
			
		|||
#define Index_Store_Data_D		0x1d
 | 
			
		||||
#define Index_Store_Data_S		0x1f
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Loongson2-specific cacheops
 | 
			
		||||
 */
 | 
			
		||||
#define Hit_Invalidate_I_Loongson23	0x00
 | 
			
		||||
 | 
			
		||||
#endif	/* __ASM_CACHEOPS_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -15,6 +15,7 @@
 | 
			
		|||
#include <asm/asm.h>
 | 
			
		||||
#include <asm/cacheops.h>
 | 
			
		||||
#include <asm/cpu-features.h>
 | 
			
		||||
#include <asm/cpu-type.h>
 | 
			
		||||
#include <asm/mipsmtregs.h>
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -162,7 +163,15 @@ static inline void flush_scache_line_indexed(unsigned long addr)
 | 
			
		|||
static inline void flush_icache_line(unsigned long addr)
 | 
			
		||||
{
 | 
			
		||||
	__iflush_prologue
 | 
			
		||||
	cache_op(Hit_Invalidate_I, addr);
 | 
			
		||||
	switch (boot_cpu_type()) {
 | 
			
		||||
	case CPU_LOONGSON2:
 | 
			
		||||
		cache_op(Hit_Invalidate_I_Loongson23, addr);
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
	default:
 | 
			
		||||
		cache_op(Hit_Invalidate_I, addr);
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
	__iflush_epilogue
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -208,7 +217,15 @@ static inline void flush_scache_line(unsigned long addr)
 | 
			
		|||
 */
 | 
			
		||||
static inline void protected_flush_icache_line(unsigned long addr)
 | 
			
		||||
{
 | 
			
		||||
	protected_cache_op(Hit_Invalidate_I, addr);
 | 
			
		||||
	switch (boot_cpu_type()) {
 | 
			
		||||
	case CPU_LOONGSON2:
 | 
			
		||||
		protected_cache_op(Hit_Invalidate_I_Loongson23, addr);
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
	default:
 | 
			
		||||
		protected_cache_op(Hit_Invalidate_I, addr);
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -412,8 +429,8 @@ __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64
 | 
			
		|||
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
 | 
			
		||||
 | 
			
		||||
/* build blast_xxx_range, protected_blast_xxx_range */
 | 
			
		||||
#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
 | 
			
		||||
static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
 | 
			
		||||
#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
 | 
			
		||||
static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
 | 
			
		||||
						    unsigned long end)	\
 | 
			
		||||
{									\
 | 
			
		||||
	unsigned long lsize = cpu_##desc##_line_size();			\
 | 
			
		||||
| 
						 | 
				
			
			@ -432,13 +449,15 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
 | 
			
		|||
	__##pfx##flush_epilogue						\
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \
 | 
			
		||||
	protected_, loongson23_)
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
 | 
			
		||||
/* blast_inv_dcache_range */
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
 | 
			
		||||
__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
 | 
			
		||||
 | 
			
		||||
#endif /* _ASM_R4KCACHE_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -346,14 +346,8 @@ static void r4k_blast_scache_setup(void)
 | 
			
		|||
 | 
			
		||||
static inline void local_r4k___flush_cache_all(void * args)
 | 
			
		||||
{
 | 
			
		||||
#if defined(CONFIG_CPU_LOONGSON2)
 | 
			
		||||
	r4k_blast_scache();
 | 
			
		||||
	return;
 | 
			
		||||
#endif
 | 
			
		||||
	r4k_blast_dcache();
 | 
			
		||||
	r4k_blast_icache();
 | 
			
		||||
 | 
			
		||||
	switch (current_cpu_type()) {
 | 
			
		||||
	case CPU_LOONGSON2:
 | 
			
		||||
	case CPU_R4000SC:
 | 
			
		||||
	case CPU_R4000MC:
 | 
			
		||||
	case CPU_R4400SC:
 | 
			
		||||
| 
						 | 
				
			
			@ -361,7 +355,18 @@ static inline void local_r4k___flush_cache_all(void * args)
 | 
			
		|||
	case CPU_R10000:
 | 
			
		||||
	case CPU_R12000:
 | 
			
		||||
	case CPU_R14000:
 | 
			
		||||
		/*
 | 
			
		||||
		 * These caches are inclusive caches, that is, if something
 | 
			
		||||
		 * is not cached in the S-cache, we know it also won't be
 | 
			
		||||
		 * in one of the primary caches.
 | 
			
		||||
		 */
 | 
			
		||||
		r4k_blast_scache();
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
	default:
 | 
			
		||||
		r4k_blast_dcache();
 | 
			
		||||
		r4k_blast_icache();
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -572,8 +577,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
 | 
			
		|||
 | 
			
		||||
	if (end - start > icache_size)
 | 
			
		||||
		r4k_blast_icache();
 | 
			
		||||
	else
 | 
			
		||||
		protected_blast_icache_range(start, end);
 | 
			
		||||
	else {
 | 
			
		||||
		switch (boot_cpu_type()) {
 | 
			
		||||
		case CPU_LOONGSON2:
 | 
			
		||||
			protected_blast_icache_range(start, end);
 | 
			
		||||
			break;
 | 
			
		||||
 | 
			
		||||
		default:
 | 
			
		||||
			protected_loongson23_blast_icache_range(start, end);
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void local_r4k_flush_icache_range_ipi(void *args)
 | 
			
		||||
| 
						 | 
				
			
			@ -1109,15 +1123,14 @@ static void probe_pcache(void)
 | 
			
		|||
	case CPU_ALCHEMY:
 | 
			
		||||
		c->icache.flags |= MIPS_CACHE_IC_F_DC;
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
#ifdef	CONFIG_CPU_LOONGSON2
 | 
			
		||||
	/*
 | 
			
		||||
	 * LOONGSON2 has 4 way icache, but when using indexed cache op,
 | 
			
		||||
	 * one op will act on all 4 ways
 | 
			
		||||
	 */
 | 
			
		||||
	c->icache.ways = 1;
 | 
			
		||||
#endif
 | 
			
		||||
	case CPU_LOONGSON2:
 | 
			
		||||
		/*
 | 
			
		||||
		 * LOONGSON2 has 4 way icache, but when using indexed cache op,
 | 
			
		||||
		 * one op will act on all 4 ways
 | 
			
		||||
		 */
 | 
			
		||||
		c->icache.ways = 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
 | 
			
		||||
	       icache_size >> 10,
 | 
			
		||||
| 
						 | 
				
			
			@ -1193,7 +1206,6 @@ static int probe_scache(void)
 | 
			
		|||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_CPU_LOONGSON2)
 | 
			
		||||
static void __init loongson2_sc_init(void)
 | 
			
		||||
{
 | 
			
		||||
	struct cpuinfo_mips *c = ¤t_cpu_data;
 | 
			
		||||
| 
						 | 
				
			
			@ -1209,7 +1221,6 @@ static void __init loongson2_sc_init(void)
 | 
			
		|||
 | 
			
		||||
	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
extern int r5k_sc_init(void);
 | 
			
		||||
extern int rm7k_sc_init(void);
 | 
			
		||||
| 
						 | 
				
			
			@ -1259,11 +1270,10 @@ static void setup_scache(void)
 | 
			
		|||
#endif
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_CPU_LOONGSON2)
 | 
			
		||||
	case CPU_LOONGSON2:
 | 
			
		||||
		loongson2_sc_init();
 | 
			
		||||
		return;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	case CPU_XLP:
 | 
			
		||||
		/* don't need to worry about L2, fully coherent */
 | 
			
		||||
		return;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -52,21 +52,26 @@ extern void build_tlb_refill_handler(void);
 | 
			
		|||
 | 
			
		||||
#endif /* CONFIG_MIPS_MT_SMTC */
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_CPU_LOONGSON2)
 | 
			
		||||
/*
 | 
			
		||||
 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
 | 
			
		||||
 * unfortrunately, itlb is not totally transparent to software.
 | 
			
		||||
 */
 | 
			
		||||
#define FLUSH_ITLB write_c0_diag(4);
 | 
			
		||||
static inline void flush_itlb(void)
 | 
			
		||||
{
 | 
			
		||||
	switch (current_cpu_type()) {
 | 
			
		||||
	case CPU_LOONGSON2:
 | 
			
		||||
		write_c0_diag(4);
 | 
			
		||||
		break;
 | 
			
		||||
	default:
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
 | 
			
		||||
#define FLUSH_ITLB
 | 
			
		||||
#define FLUSH_ITLB_VM(vma)
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
static inline void flush_itlb_vm(struct vm_area_struct *vma)
 | 
			
		||||
{
 | 
			
		||||
	if (vma->vm_flags & VM_EXEC)
 | 
			
		||||
		flush_itlb();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void local_flush_tlb_all(void)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -93,7 +98,7 @@ void local_flush_tlb_all(void)
 | 
			
		|||
	}
 | 
			
		||||
	tlbw_use_hazard();
 | 
			
		||||
	write_c0_entryhi(old_ctx);
 | 
			
		||||
	FLUSH_ITLB;
 | 
			
		||||
	flush_itlb();
 | 
			
		||||
	EXIT_CRITICAL(flags);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(local_flush_tlb_all);
 | 
			
		||||
| 
						 | 
				
			
			@ -155,7 +160,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 | 
			
		|||
		} else {
 | 
			
		||||
			drop_mmu_context(mm, cpu);
 | 
			
		||||
		}
 | 
			
		||||
		FLUSH_ITLB;
 | 
			
		||||
		flush_itlb();
 | 
			
		||||
		EXIT_CRITICAL(flags);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -197,7 +202,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 | 
			
		|||
	} else {
 | 
			
		||||
		local_flush_tlb_all();
 | 
			
		||||
	}
 | 
			
		||||
	FLUSH_ITLB;
 | 
			
		||||
	flush_itlb();
 | 
			
		||||
	EXIT_CRITICAL(flags);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -230,7 +235,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 | 
			
		|||
 | 
			
		||||
	finish:
 | 
			
		||||
		write_c0_entryhi(oldpid);
 | 
			
		||||
		FLUSH_ITLB_VM(vma);
 | 
			
		||||
		flush_itlb_vm(vma);
 | 
			
		||||
		EXIT_CRITICAL(flags);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -262,7 +267,7 @@ void local_flush_tlb_one(unsigned long page)
 | 
			
		|||
		tlbw_use_hazard();
 | 
			
		||||
	}
 | 
			
		||||
	write_c0_entryhi(oldpid);
 | 
			
		||||
	FLUSH_ITLB;
 | 
			
		||||
	flush_itlb();
 | 
			
		||||
	EXIT_CRITICAL(flags);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -335,7 +340,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 | 
			
		|||
			tlb_write_indexed();
 | 
			
		||||
	}
 | 
			
		||||
	tlbw_use_hazard();
 | 
			
		||||
	FLUSH_ITLB_VM(vma);
 | 
			
		||||
	flush_itlb_vm(vma);
 | 
			
		||||
	EXIT_CRITICAL(flags);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1311,95 +1311,100 @@ static void build_r4000_tlb_refill_handler(void)
 | 
			
		|||
	 * need three, with the second nop'ed and the third being
 | 
			
		||||
	 * unused.
 | 
			
		||||
	 */
 | 
			
		||||
	/* Loongson2 ebase is different than r4k, we have more space */
 | 
			
		||||
#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
 | 
			
		||||
	if ((p - tlb_handler) > 64)
 | 
			
		||||
		panic("TLB refill handler space exceeded");
 | 
			
		||||
#else
 | 
			
		||||
	if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
 | 
			
		||||
	    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
 | 
			
		||||
		&& uasm_insn_has_bdelay(relocs,
 | 
			
		||||
					tlb_handler + MIPS64_REFILL_INSNS - 3)))
 | 
			
		||||
		panic("TLB refill handler space exceeded");
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Now fold the handler in the TLB refill handler space.
 | 
			
		||||
	 */
 | 
			
		||||
#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
 | 
			
		||||
	f = final_handler;
 | 
			
		||||
	/* Simplest case, just copy the handler. */
 | 
			
		||||
	uasm_copy_handler(relocs, labels, tlb_handler, p, f);
 | 
			
		||||
	final_len = p - tlb_handler;
 | 
			
		||||
#else /* CONFIG_64BIT */
 | 
			
		||||
	f = final_handler + MIPS64_REFILL_INSNS;
 | 
			
		||||
	if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
 | 
			
		||||
		/* Just copy the handler. */
 | 
			
		||||
		uasm_copy_handler(relocs, labels, tlb_handler, p, f);
 | 
			
		||||
		final_len = p - tlb_handler;
 | 
			
		||||
	} else {
 | 
			
		||||
	switch (boot_cpu_type()) {
 | 
			
		||||
	default:
 | 
			
		||||
		if (sizeof(long) == 4) {
 | 
			
		||||
	case CPU_LOONGSON2:
 | 
			
		||||
		/* Loongson2 ebase is different than r4k, we have more space */
 | 
			
		||||
			if ((p - tlb_handler) > 64)
 | 
			
		||||
				panic("TLB refill handler space exceeded");
 | 
			
		||||
			/*
 | 
			
		||||
			 * Now fold the handler in the TLB refill handler space.
 | 
			
		||||
			 */
 | 
			
		||||
			f = final_handler;
 | 
			
		||||
			/* Simplest case, just copy the handler. */
 | 
			
		||||
			uasm_copy_handler(relocs, labels, tlb_handler, p, f);
 | 
			
		||||
			final_len = p - tlb_handler;
 | 
			
		||||
			break;
 | 
			
		||||
		} else {
 | 
			
		||||
			if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
 | 
			
		||||
			    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
 | 
			
		||||
				&& uasm_insn_has_bdelay(relocs,
 | 
			
		||||
							tlb_handler + MIPS64_REFILL_INSNS - 3)))
 | 
			
		||||
				panic("TLB refill handler space exceeded");
 | 
			
		||||
			/*
 | 
			
		||||
			 * Now fold the handler in the TLB refill handler space.
 | 
			
		||||
			 */
 | 
			
		||||
			f = final_handler + MIPS64_REFILL_INSNS;
 | 
			
		||||
			if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
 | 
			
		||||
				/* Just copy the handler. */
 | 
			
		||||
				uasm_copy_handler(relocs, labels, tlb_handler, p, f);
 | 
			
		||||
				final_len = p - tlb_handler;
 | 
			
		||||
			} else {
 | 
			
		||||
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 | 
			
		||||
		const enum label_id ls = label_tlb_huge_update;
 | 
			
		||||
				const enum label_id ls = label_tlb_huge_update;
 | 
			
		||||
#else
 | 
			
		||||
		const enum label_id ls = label_vmalloc;
 | 
			
		||||
				const enum label_id ls = label_vmalloc;
 | 
			
		||||
#endif
 | 
			
		||||
		u32 *split;
 | 
			
		||||
		int ov = 0;
 | 
			
		||||
		int i;
 | 
			
		||||
				u32 *split;
 | 
			
		||||
				int ov = 0;
 | 
			
		||||
				int i;
 | 
			
		||||
 | 
			
		||||
		for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
 | 
			
		||||
			;
 | 
			
		||||
		BUG_ON(i == ARRAY_SIZE(labels));
 | 
			
		||||
		split = labels[i].addr;
 | 
			
		||||
				for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
 | 
			
		||||
					;
 | 
			
		||||
				BUG_ON(i == ARRAY_SIZE(labels));
 | 
			
		||||
				split = labels[i].addr;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * See if we have overflown one way or the other.
 | 
			
		||||
		 */
 | 
			
		||||
		if (split > tlb_handler + MIPS64_REFILL_INSNS ||
 | 
			
		||||
		    split < p - MIPS64_REFILL_INSNS)
 | 
			
		||||
			ov = 1;
 | 
			
		||||
				/*
 | 
			
		||||
				 * See if we have overflown one way or the other.
 | 
			
		||||
				 */
 | 
			
		||||
				if (split > tlb_handler + MIPS64_REFILL_INSNS ||
 | 
			
		||||
				    split < p - MIPS64_REFILL_INSNS)
 | 
			
		||||
					ov = 1;
 | 
			
		||||
 | 
			
		||||
		if (ov) {
 | 
			
		||||
			/*
 | 
			
		||||
			 * Split two instructions before the end.  One
 | 
			
		||||
			 * for the branch and one for the instruction
 | 
			
		||||
			 * in the delay slot.
 | 
			
		||||
			 */
 | 
			
		||||
			split = tlb_handler + MIPS64_REFILL_INSNS - 2;
 | 
			
		||||
				if (ov) {
 | 
			
		||||
					/*
 | 
			
		||||
					 * Split two instructions before the end.  One
 | 
			
		||||
					 * for the branch and one for the instruction
 | 
			
		||||
					 * in the delay slot.
 | 
			
		||||
					 */
 | 
			
		||||
					split = tlb_handler + MIPS64_REFILL_INSNS - 2;
 | 
			
		||||
 | 
			
		||||
			/*
 | 
			
		||||
			 * If the branch would fall in a delay slot,
 | 
			
		||||
			 * we must back up an additional instruction
 | 
			
		||||
			 * so that it is no longer in a delay slot.
 | 
			
		||||
			 */
 | 
			
		||||
			if (uasm_insn_has_bdelay(relocs, split - 1))
 | 
			
		||||
				split--;
 | 
			
		||||
		}
 | 
			
		||||
		/* Copy first part of the handler. */
 | 
			
		||||
		uasm_copy_handler(relocs, labels, tlb_handler, split, f);
 | 
			
		||||
		f += split - tlb_handler;
 | 
			
		||||
					/*
 | 
			
		||||
					 * If the branch would fall in a delay slot,
 | 
			
		||||
					 * we must back up an additional instruction
 | 
			
		||||
					 * so that it is no longer in a delay slot.
 | 
			
		||||
					 */
 | 
			
		||||
					if (uasm_insn_has_bdelay(relocs, split - 1))
 | 
			
		||||
						split--;
 | 
			
		||||
				}
 | 
			
		||||
				/* Copy first part of the handler. */
 | 
			
		||||
				uasm_copy_handler(relocs, labels, tlb_handler, split, f);
 | 
			
		||||
				f += split - tlb_handler;
 | 
			
		||||
 | 
			
		||||
		if (ov) {
 | 
			
		||||
			/* Insert branch. */
 | 
			
		||||
			uasm_l_split(&l, final_handler);
 | 
			
		||||
			uasm_il_b(&f, &r, label_split);
 | 
			
		||||
			if (uasm_insn_has_bdelay(relocs, split))
 | 
			
		||||
				uasm_i_nop(&f);
 | 
			
		||||
			else {
 | 
			
		||||
				uasm_copy_handler(relocs, labels,
 | 
			
		||||
						  split, split + 1, f);
 | 
			
		||||
				uasm_move_labels(labels, f, f + 1, -1);
 | 
			
		||||
				f++;
 | 
			
		||||
				split++;
 | 
			
		||||
				if (ov) {
 | 
			
		||||
					/* Insert branch. */
 | 
			
		||||
					uasm_l_split(&l, final_handler);
 | 
			
		||||
					uasm_il_b(&f, &r, label_split);
 | 
			
		||||
					if (uasm_insn_has_bdelay(relocs, split))
 | 
			
		||||
						uasm_i_nop(&f);
 | 
			
		||||
					else {
 | 
			
		||||
						uasm_copy_handler(relocs, labels,
 | 
			
		||||
								  split, split + 1, f);
 | 
			
		||||
						uasm_move_labels(labels, f, f + 1, -1);
 | 
			
		||||
						f++;
 | 
			
		||||
						split++;
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				/* Copy the rest of the handler. */
 | 
			
		||||
				uasm_copy_handler(relocs, labels, split, p, final_handler);
 | 
			
		||||
				final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
 | 
			
		||||
					    (p - split);
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* Copy the rest of the handler. */
 | 
			
		||||
		uasm_copy_handler(relocs, labels, split, p, final_handler);
 | 
			
		||||
		final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
 | 
			
		||||
			    (p - split);
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
#endif /* CONFIG_64BIT */
 | 
			
		||||
 | 
			
		||||
	uasm_resolve_relocs(relocs, labels);
 | 
			
		||||
	pr_debug("Wrote TLB refill handler (%u instructions).\n",
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue