forked from mirrors/linux
		
	Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/vxlan.c drivers/vhost/net.c include/linux/if_vlan.h net/core/dev.c The net/core/dev.c conflict was the overlap of one commit marking an existing function static whilst another was adding a new function. In the include/linux/if_vlan.h case, the type used for a local variable was changed in 'net', whereas the function got rewritten to fix a stacked vlan bug in 'net-next'. In drivers/vhost/net.c, Al Viro's iov_iter conversions in 'net-next' overlapped with an endainness fix for VHOST 1.0 in 'net'. In drivers/net/vxlan.c, vxlan_find_vni() added a 'flags' parameter in 'net-next' whereas in 'net' there was a bug fix to pass in the correct network namespace pointer in calls to this function. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
						commit
						6e03f896b5
					
				
					 280 changed files with 2583 additions and 1662 deletions
				
			
		| 
						 | 
					@ -31,7 +31,7 @@ i2c0: i2c@fed40000 {
 | 
				
			||||||
	compatible	= "st,comms-ssc4-i2c";
 | 
						compatible	= "st,comms-ssc4-i2c";
 | 
				
			||||||
	reg		= <0xfed40000 0x110>;
 | 
						reg		= <0xfed40000 0x110>;
 | 
				
			||||||
	interrupts	=  <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
 | 
						interrupts	=  <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
 | 
				
			||||||
	clocks		= <&CLK_S_ICN_REG_0>;
 | 
						clocks		= <&clk_s_a0_ls CLK_ICN_REG>;
 | 
				
			||||||
	clock-names	= "ssc";
 | 
						clock-names	= "ssc";
 | 
				
			||||||
	clock-frequency = <400000>;
 | 
						clock-frequency = <400000>;
 | 
				
			||||||
	pinctrl-names	= "default";
 | 
						pinctrl-names	= "default";
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -47,6 +47,7 @@ dallas,ds3232		Extremely Accurate I²C RTC with Integrated Crystal and SRAM
 | 
				
			||||||
dallas,ds4510		CPU Supervisor with Nonvolatile Memory and Programmable I/O
 | 
					dallas,ds4510		CPU Supervisor with Nonvolatile Memory and Programmable I/O
 | 
				
			||||||
dallas,ds75		Digital Thermometer and Thermostat
 | 
					dallas,ds75		Digital Thermometer and Thermostat
 | 
				
			||||||
dlg,da9053		DA9053: flexible system level PMIC with multicore support
 | 
					dlg,da9053		DA9053: flexible system level PMIC with multicore support
 | 
				
			||||||
 | 
					dlg,da9063		DA9063: system PMIC for quad-core application processors
 | 
				
			||||||
epson,rx8025		High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
 | 
					epson,rx8025		High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
 | 
				
			||||||
epson,rx8581		I2C-BUS INTERFACE REAL TIME CLOCK MODULE
 | 
					epson,rx8581		I2C-BUS INTERFACE REAL TIME CLOCK MODULE
 | 
				
			||||||
fsl,mag3110		MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
 | 
					fsl,mag3110		MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -199,16 +199,9 @@ frame header.
 | 
				
			||||||
TX limitations
 | 
					TX limitations
 | 
				
			||||||
--------------
 | 
					--------------
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Kernel processing usually involves validation of the message received by
 | 
					As of Jan 2015 the message is always copied from the ring frame to an
 | 
				
			||||||
user-space, then processing its contents. The kernel must assure that
 | 
					allocated buffer due to unresolved security concerns.
 | 
				
			||||||
userspace is not able to modify the message contents after they have been
 | 
					See commit 4682a0358639b29cf ("netlink: Always copy on mmap TX.").
 | 
				
			||||||
validated. In order to do so, the message is copied from the ring frame
 | 
					 | 
				
			||||||
to an allocated buffer if either of these conditions is false:
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
- only a single mapping of the ring exists
 | 
					 | 
				
			||||||
- the file descriptor is not shared between processes
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
This means that for threaded programs, the kernel will fall back to copying.
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
Example
 | 
					Example
 | 
				
			||||||
-------
 | 
					-------
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										11
									
								
								MAINTAINERS
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								MAINTAINERS
									
									
									
									
									
								
							| 
						 | 
					@ -708,6 +708,16 @@ X:	drivers/iio/*/adjd*
 | 
				
			||||||
F:	drivers/staging/iio/*/ad*
 | 
					F:	drivers/staging/iio/*/ad*
 | 
				
			||||||
F:	staging/iio/trigger/iio-trig-bfin-timer.c
 | 
					F:	staging/iio/trigger/iio-trig-bfin-timer.c
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ANDROID DRIVERS
 | 
				
			||||||
 | 
					M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 | 
				
			||||||
 | 
					M:	Arve Hjønnevåg <arve@android.com>
 | 
				
			||||||
 | 
					M:	Riley Andrews <riandrews@android.com>
 | 
				
			||||||
 | 
					T:	git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git
 | 
				
			||||||
 | 
					L:	devel@driverdev.osuosl.org
 | 
				
			||||||
 | 
					S:	Supported
 | 
				
			||||||
 | 
					F:	drivers/android/
 | 
				
			||||||
 | 
					F:	drivers/staging/android/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
AOA (Apple Onboard Audio) ALSA DRIVER
 | 
					AOA (Apple Onboard Audio) ALSA DRIVER
 | 
				
			||||||
M:	Johannes Berg <johannes@sipsolutions.net>
 | 
					M:	Johannes Berg <johannes@sipsolutions.net>
 | 
				
			||||||
L:	linuxppc-dev@lists.ozlabs.org
 | 
					L:	linuxppc-dev@lists.ozlabs.org
 | 
				
			||||||
| 
						 | 
					@ -10181,6 +10191,7 @@ USERSPACE I/O (UIO)
 | 
				
			||||||
M:	"Hans J. Koch" <hjk@hansjkoch.de>
 | 
					M:	"Hans J. Koch" <hjk@hansjkoch.de>
 | 
				
			||||||
M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 | 
					M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 | 
				
			||||||
S:	Maintained
 | 
					S:	Maintained
 | 
				
			||||||
 | 
					T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
 | 
				
			||||||
F:	Documentation/DocBook/uio-howto.tmpl
 | 
					F:	Documentation/DocBook/uio-howto.tmpl
 | 
				
			||||||
F:	drivers/uio/
 | 
					F:	drivers/uio/
 | 
				
			||||||
F:	include/linux/uio*.h
 | 
					F:	include/linux/uio*.h
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										2
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								Makefile
									
									
									
									
									
								
							| 
						 | 
					@ -1,7 +1,7 @@
 | 
				
			||||||
VERSION = 3
 | 
					VERSION = 3
 | 
				
			||||||
PATCHLEVEL = 19
 | 
					PATCHLEVEL = 19
 | 
				
			||||||
SUBLEVEL = 0
 | 
					SUBLEVEL = 0
 | 
				
			||||||
EXTRAVERSION = -rc6
 | 
					EXTRAVERSION = -rc7
 | 
				
			||||||
NAME = Diseased Newt
 | 
					NAME = Diseased Newt
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# *DOCUMENTATION*
 | 
					# *DOCUMENTATION*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -156,6 +156,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -161,6 +161,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (fault & VM_FAULT_OOM)
 | 
						if (fault & VM_FAULT_OOM)
 | 
				
			||||||
		goto out_of_memory;
 | 
							goto out_of_memory;
 | 
				
			||||||
 | 
						else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
							goto bad_area;
 | 
				
			||||||
	else if (fault & VM_FAULT_SIGBUS)
 | 
						else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
		goto do_sigbus;
 | 
							goto do_sigbus;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -263,16 +263,37 @@ restart:	adr	r0, LC0
 | 
				
			||||||
		 * OK... Let's do some funky business here.
 | 
							 * OK... Let's do some funky business here.
 | 
				
			||||||
		 * If we do have a DTB appended to zImage, and we do have
 | 
							 * If we do have a DTB appended to zImage, and we do have
 | 
				
			||||||
		 * an ATAG list around, we want the later to be translated
 | 
							 * an ATAG list around, we want the later to be translated
 | 
				
			||||||
		 * and folded into the former here.  To be on the safe side,
 | 
							 * and folded into the former here. No GOT fixup has occurred
 | 
				
			||||||
		 * let's temporarily move  the stack away into the malloc
 | 
							 * yet, but none of the code we're about to call uses any
 | 
				
			||||||
		 * area.  No GOT fixup has occurred yet, but none of the
 | 
							 * global variable.
 | 
				
			||||||
		 * code we're about to call uses any global variable.
 | 
					 | 
				
			||||||
		*/
 | 
							*/
 | 
				
			||||||
		add	sp, sp, #0x10000
 | 
					
 | 
				
			||||||
 | 
							/* Get the initial DTB size */
 | 
				
			||||||
 | 
							ldr	r5, [r6, #4]
 | 
				
			||||||
 | 
					#ifndef __ARMEB__
 | 
				
			||||||
 | 
							/* convert to little endian */
 | 
				
			||||||
 | 
							eor	r1, r5, r5, ror #16
 | 
				
			||||||
 | 
							bic	r1, r1, #0x00ff0000
 | 
				
			||||||
 | 
							mov	r5, r5, ror #8
 | 
				
			||||||
 | 
							eor	r5, r5, r1, lsr #8
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
							/* 50% DTB growth should be good enough */
 | 
				
			||||||
 | 
							add	r5, r5, r5, lsr #1
 | 
				
			||||||
 | 
							/* preserve 64-bit alignment */
 | 
				
			||||||
 | 
							add	r5, r5, #7
 | 
				
			||||||
 | 
							bic	r5, r5, #7
 | 
				
			||||||
 | 
							/* clamp to 32KB min and 1MB max */
 | 
				
			||||||
 | 
							cmp	r5, #(1 << 15)
 | 
				
			||||||
 | 
							movlo	r5, #(1 << 15)
 | 
				
			||||||
 | 
							cmp	r5, #(1 << 20)
 | 
				
			||||||
 | 
							movhi	r5, #(1 << 20)
 | 
				
			||||||
 | 
							/* temporarily relocate the stack past the DTB work space */
 | 
				
			||||||
 | 
							add	sp, sp, r5
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		stmfd	sp!, {r0-r3, ip, lr}
 | 
							stmfd	sp!, {r0-r3, ip, lr}
 | 
				
			||||||
		mov	r0, r8
 | 
							mov	r0, r8
 | 
				
			||||||
		mov	r1, r6
 | 
							mov	r1, r6
 | 
				
			||||||
		sub	r2, sp, r6
 | 
							mov	r2, r5
 | 
				
			||||||
		bl	atags_to_fdt
 | 
							bl	atags_to_fdt
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					@ -285,11 +306,11 @@ restart:	adr	r0, LC0
 | 
				
			||||||
		bic	r0, r0, #1
 | 
							bic	r0, r0, #1
 | 
				
			||||||
		add	r0, r0, #0x100
 | 
							add	r0, r0, #0x100
 | 
				
			||||||
		mov	r1, r6
 | 
							mov	r1, r6
 | 
				
			||||||
		sub	r2, sp, r6
 | 
							mov	r2, r5
 | 
				
			||||||
		bleq	atags_to_fdt
 | 
							bleq	atags_to_fdt
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ldmfd	sp!, {r0-r3, ip, lr}
 | 
							ldmfd	sp!, {r0-r3, ip, lr}
 | 
				
			||||||
		sub	sp, sp, #0x10000
 | 
							sub	sp, sp, r5
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		mov	r8, r6			@ use the appended device tree
 | 
							mov	r8, r6			@ use the appended device tree
 | 
				
			||||||
| 
						 | 
					@ -306,7 +327,7 @@ restart:	adr	r0, LC0
 | 
				
			||||||
		subs	r1, r5, r1
 | 
							subs	r1, r5, r1
 | 
				
			||||||
		addhi	r9, r9, r1
 | 
							addhi	r9, r9, r1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* Get the dtb's size */
 | 
							/* Get the current DTB size */
 | 
				
			||||||
		ldr	r5, [r6, #4]
 | 
							ldr	r5, [r6, #4]
 | 
				
			||||||
#ifndef __ARMEB__
 | 
					#ifndef __ARMEB__
 | 
				
			||||||
		/* convert r5 (dtb size) to little endian */
 | 
							/* convert r5 (dtb size) to little endian */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -17,14 +17,6 @@ / {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	aliases {
 | 
						aliases {
 | 
				
			||||||
		ethernet0 = &emac;
 | 
							ethernet0 = &emac;
 | 
				
			||||||
		serial0 = &uart0;
 | 
					 | 
				
			||||||
		serial1 = &uart1;
 | 
					 | 
				
			||||||
		serial2 = &uart2;
 | 
					 | 
				
			||||||
		serial3 = &uart3;
 | 
					 | 
				
			||||||
		serial4 = &uart4;
 | 
					 | 
				
			||||||
		serial5 = &uart5;
 | 
					 | 
				
			||||||
		serial6 = &uart6;
 | 
					 | 
				
			||||||
		serial7 = &uart7;
 | 
					 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	chosen {
 | 
						chosen {
 | 
				
			||||||
| 
						 | 
					@ -39,6 +31,14 @@ framebuffer@0 {
 | 
				
			||||||
				 <&ahb_gates 44>;
 | 
									 <&ahb_gates 44>;
 | 
				
			||||||
			status = "disabled";
 | 
								status = "disabled";
 | 
				
			||||||
		};
 | 
							};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							framebuffer@1 {
 | 
				
			||||||
 | 
								compatible = "allwinner,simple-framebuffer", "simple-framebuffer";
 | 
				
			||||||
 | 
								allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
 | 
				
			||||||
 | 
								clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
 | 
				
			||||||
 | 
									 <&ahb_gates 44>, <&ahb_gates 46>;
 | 
				
			||||||
 | 
								status = "disabled";
 | 
				
			||||||
 | 
							};
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpus {
 | 
						cpus {
 | 
				
			||||||
| 
						 | 
					@ -438,8 +438,8 @@ usbphy: phy@01c13400 {
 | 
				
			||||||
			reg-names = "phy_ctrl", "pmu1", "pmu2";
 | 
								reg-names = "phy_ctrl", "pmu1", "pmu2";
 | 
				
			||||||
			clocks = <&usb_clk 8>;
 | 
								clocks = <&usb_clk 8>;
 | 
				
			||||||
			clock-names = "usb_phy";
 | 
								clock-names = "usb_phy";
 | 
				
			||||||
			resets = <&usb_clk 1>, <&usb_clk 2>;
 | 
								resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>;
 | 
				
			||||||
			reset-names = "usb1_reset", "usb2_reset";
 | 
								reset-names = "usb0_reset", "usb1_reset", "usb2_reset";
 | 
				
			||||||
			status = "disabled";
 | 
								status = "disabled";
 | 
				
			||||||
		};
 | 
							};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -55,6 +55,12 @@ / {
 | 
				
			||||||
	model = "Olimex A10s-Olinuxino Micro";
 | 
						model = "Olimex A10s-Olinuxino Micro";
 | 
				
			||||||
	compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s";
 | 
						compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						aliases {
 | 
				
			||||||
 | 
							serial0 = &uart0;
 | 
				
			||||||
 | 
							serial1 = &uart2;
 | 
				
			||||||
 | 
							serial2 = &uart3;
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	soc@01c00000 {
 | 
						soc@01c00000 {
 | 
				
			||||||
		emac: ethernet@01c0b000 {
 | 
							emac: ethernet@01c0b000 {
 | 
				
			||||||
			pinctrl-names = "default";
 | 
								pinctrl-names = "default";
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -18,10 +18,6 @@ / {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	aliases {
 | 
						aliases {
 | 
				
			||||||
		ethernet0 = &emac;
 | 
							ethernet0 = &emac;
 | 
				
			||||||
		serial0 = &uart0;
 | 
					 | 
				
			||||||
		serial1 = &uart1;
 | 
					 | 
				
			||||||
		serial2 = &uart2;
 | 
					 | 
				
			||||||
		serial3 = &uart3;
 | 
					 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	chosen {
 | 
						chosen {
 | 
				
			||||||
| 
						 | 
					@ -390,8 +386,8 @@ usbphy: phy@01c13400 {
 | 
				
			||||||
			reg-names = "phy_ctrl", "pmu1";
 | 
								reg-names = "phy_ctrl", "pmu1";
 | 
				
			||||||
			clocks = <&usb_clk 8>;
 | 
								clocks = <&usb_clk 8>;
 | 
				
			||||||
			clock-names = "usb_phy";
 | 
								clock-names = "usb_phy";
 | 
				
			||||||
			resets = <&usb_clk 1>;
 | 
								resets = <&usb_clk 0>, <&usb_clk 1>;
 | 
				
			||||||
			reset-names = "usb1_reset";
 | 
								reset-names = "usb0_reset", "usb1_reset";
 | 
				
			||||||
			status = "disabled";
 | 
								status = "disabled";
 | 
				
			||||||
		};
 | 
							};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -53,6 +53,10 @@ / {
 | 
				
			||||||
	model = "HSG H702";
 | 
						model = "HSG H702";
 | 
				
			||||||
	compatible = "hsg,h702", "allwinner,sun5i-a13";
 | 
						compatible = "hsg,h702", "allwinner,sun5i-a13";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						aliases {
 | 
				
			||||||
 | 
							serial0 = &uart1;
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	soc@01c00000 {
 | 
						soc@01c00000 {
 | 
				
			||||||
		mmc0: mmc@01c0f000 {
 | 
							mmc0: mmc@01c0f000 {
 | 
				
			||||||
			pinctrl-names = "default";
 | 
								pinctrl-names = "default";
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -54,6 +54,10 @@ / {
 | 
				
			||||||
	model = "Olimex A13-Olinuxino Micro";
 | 
						model = "Olimex A13-Olinuxino Micro";
 | 
				
			||||||
	compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13";
 | 
						compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						aliases {
 | 
				
			||||||
 | 
							serial0 = &uart1;
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	soc@01c00000 {
 | 
						soc@01c00000 {
 | 
				
			||||||
		mmc0: mmc@01c0f000 {
 | 
							mmc0: mmc@01c0f000 {
 | 
				
			||||||
			pinctrl-names = "default";
 | 
								pinctrl-names = "default";
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -55,6 +55,10 @@ / {
 | 
				
			||||||
	model = "Olimex A13-Olinuxino";
 | 
						model = "Olimex A13-Olinuxino";
 | 
				
			||||||
	compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
 | 
						compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						aliases {
 | 
				
			||||||
 | 
							serial0 = &uart1;
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	soc@01c00000 {
 | 
						soc@01c00000 {
 | 
				
			||||||
		mmc0: mmc@01c0f000 {
 | 
							mmc0: mmc@01c0f000 {
 | 
				
			||||||
			pinctrl-names = "default";
 | 
								pinctrl-names = "default";
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -16,11 +16,6 @@
 | 
				
			||||||
/ {
 | 
					/ {
 | 
				
			||||||
	interrupt-parent = <&intc>;
 | 
						interrupt-parent = <&intc>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	aliases {
 | 
					 | 
				
			||||||
		serial0 = &uart1;
 | 
					 | 
				
			||||||
		serial1 = &uart3;
 | 
					 | 
				
			||||||
	};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cpus {
 | 
						cpus {
 | 
				
			||||||
		#address-cells = <1>;
 | 
							#address-cells = <1>;
 | 
				
			||||||
		#size-cells = <0>;
 | 
							#size-cells = <0>;
 | 
				
			||||||
| 
						 | 
					@ -349,8 +344,8 @@ usbphy: phy@01c13400 {
 | 
				
			||||||
			reg-names = "phy_ctrl", "pmu1";
 | 
								reg-names = "phy_ctrl", "pmu1";
 | 
				
			||||||
			clocks = <&usb_clk 8>;
 | 
								clocks = <&usb_clk 8>;
 | 
				
			||||||
			clock-names = "usb_phy";
 | 
								clock-names = "usb_phy";
 | 
				
			||||||
			resets = <&usb_clk 1>;
 | 
								resets = <&usb_clk 0>, <&usb_clk 1>;
 | 
				
			||||||
			reset-names = "usb1_reset";
 | 
								reset-names = "usb0_reset", "usb1_reset";
 | 
				
			||||||
			status = "disabled";
 | 
								status = "disabled";
 | 
				
			||||||
		};
 | 
							};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -53,12 +53,6 @@ / {
 | 
				
			||||||
	interrupt-parent = <&gic>;
 | 
						interrupt-parent = <&gic>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	aliases {
 | 
						aliases {
 | 
				
			||||||
		serial0 = &uart0;
 | 
					 | 
				
			||||||
		serial1 = &uart1;
 | 
					 | 
				
			||||||
		serial2 = &uart2;
 | 
					 | 
				
			||||||
		serial3 = &uart3;
 | 
					 | 
				
			||||||
		serial4 = &uart4;
 | 
					 | 
				
			||||||
		serial5 = &uart5;
 | 
					 | 
				
			||||||
		ethernet0 = &gmac;
 | 
							ethernet0 = &gmac;
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -55,6 +55,12 @@ / {
 | 
				
			||||||
	model = "LeMaker Banana Pi";
 | 
						model = "LeMaker Banana Pi";
 | 
				
			||||||
	compatible = "lemaker,bananapi", "allwinner,sun7i-a20";
 | 
						compatible = "lemaker,bananapi", "allwinner,sun7i-a20";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						aliases {
 | 
				
			||||||
 | 
							serial0 = &uart0;
 | 
				
			||||||
 | 
							serial1 = &uart3;
 | 
				
			||||||
 | 
							serial2 = &uart7;
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	soc@01c00000 {
 | 
						soc@01c00000 {
 | 
				
			||||||
		spi0: spi@01c05000 {
 | 
							spi0: spi@01c05000 {
 | 
				
			||||||
			pinctrl-names = "default";
 | 
								pinctrl-names = "default";
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -19,6 +19,14 @@ / {
 | 
				
			||||||
	model = "Merrii A20 Hummingbird";
 | 
						model = "Merrii A20 Hummingbird";
 | 
				
			||||||
	compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20";
 | 
						compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						aliases {
 | 
				
			||||||
 | 
							serial0 = &uart0;
 | 
				
			||||||
 | 
							serial1 = &uart2;
 | 
				
			||||||
 | 
							serial2 = &uart3;
 | 
				
			||||||
 | 
							serial3 = &uart4;
 | 
				
			||||||
 | 
							serial4 = &uart5;
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	soc@01c00000 {
 | 
						soc@01c00000 {
 | 
				
			||||||
		mmc0: mmc@01c0f000 {
 | 
							mmc0: mmc@01c0f000 {
 | 
				
			||||||
			pinctrl-names = "default";
 | 
								pinctrl-names = "default";
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -20,6 +20,9 @@ / {
 | 
				
			||||||
	compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20";
 | 
						compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	aliases {
 | 
						aliases {
 | 
				
			||||||
 | 
							serial0 = &uart0;
 | 
				
			||||||
 | 
							serial1 = &uart6;
 | 
				
			||||||
 | 
							serial2 = &uart7;
 | 
				
			||||||
		spi0 = &spi1;
 | 
							spi0 = &spi1;
 | 
				
			||||||
		spi1 = &spi2;
 | 
							spi1 = &spi2;
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -54,14 +54,6 @@ / {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	aliases {
 | 
						aliases {
 | 
				
			||||||
		ethernet0 = &gmac;
 | 
							ethernet0 = &gmac;
 | 
				
			||||||
		serial0 = &uart0;
 | 
					 | 
				
			||||||
		serial1 = &uart1;
 | 
					 | 
				
			||||||
		serial2 = &uart2;
 | 
					 | 
				
			||||||
		serial3 = &uart3;
 | 
					 | 
				
			||||||
		serial4 = &uart4;
 | 
					 | 
				
			||||||
		serial5 = &uart5;
 | 
					 | 
				
			||||||
		serial6 = &uart6;
 | 
					 | 
				
			||||||
		serial7 = &uart7;
 | 
					 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	chosen {
 | 
						chosen {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -55,6 +55,10 @@ / {
 | 
				
			||||||
	model = "Ippo Q8H Dual Core Tablet (v5)";
 | 
						model = "Ippo Q8H Dual Core Tablet (v5)";
 | 
				
			||||||
	compatible = "ippo,q8h-v5", "allwinner,sun8i-a23";
 | 
						compatible = "ippo,q8h-v5", "allwinner,sun8i-a23";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						aliases {
 | 
				
			||||||
 | 
							serial0 = &r_uart;
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	chosen {
 | 
						chosen {
 | 
				
			||||||
		bootargs = "earlyprintk console=ttyS0,115200";
 | 
							bootargs = "earlyprintk console=ttyS0,115200";
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -52,15 +52,6 @@
 | 
				
			||||||
/ {
 | 
					/ {
 | 
				
			||||||
	interrupt-parent = <&gic>;
 | 
						interrupt-parent = <&gic>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	aliases {
 | 
					 | 
				
			||||||
		serial0 = &uart0;
 | 
					 | 
				
			||||||
		serial1 = &uart1;
 | 
					 | 
				
			||||||
		serial2 = &uart2;
 | 
					 | 
				
			||||||
		serial3 = &uart3;
 | 
					 | 
				
			||||||
		serial4 = &uart4;
 | 
					 | 
				
			||||||
		serial5 = &r_uart;
 | 
					 | 
				
			||||||
	};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cpus {
 | 
						cpus {
 | 
				
			||||||
		#address-cells = <1>;
 | 
							#address-cells = <1>;
 | 
				
			||||||
		#size-cells = <0>;
 | 
							#size-cells = <0>;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -54,6 +54,11 @@ / {
 | 
				
			||||||
	model = "Merrii A80 Optimus Board";
 | 
						model = "Merrii A80 Optimus Board";
 | 
				
			||||||
	compatible = "merrii,a80-optimus", "allwinner,sun9i-a80";
 | 
						compatible = "merrii,a80-optimus", "allwinner,sun9i-a80";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						aliases {
 | 
				
			||||||
 | 
							serial0 = &uart0;
 | 
				
			||||||
 | 
							serial1 = &uart4;
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	chosen {
 | 
						chosen {
 | 
				
			||||||
		bootargs = "earlyprintk console=ttyS0,115200";
 | 
							bootargs = "earlyprintk console=ttyS0,115200";
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -52,16 +52,6 @@
 | 
				
			||||||
/ {
 | 
					/ {
 | 
				
			||||||
	interrupt-parent = <&gic>;
 | 
						interrupt-parent = <&gic>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	aliases {
 | 
					 | 
				
			||||||
		serial0 = &uart0;
 | 
					 | 
				
			||||||
		serial1 = &uart1;
 | 
					 | 
				
			||||||
		serial2 = &uart2;
 | 
					 | 
				
			||||||
		serial3 = &uart3;
 | 
					 | 
				
			||||||
		serial4 = &uart4;
 | 
					 | 
				
			||||||
		serial5 = &uart5;
 | 
					 | 
				
			||||||
		serial6 = &r_uart;
 | 
					 | 
				
			||||||
	};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	cpus {
 | 
						cpus {
 | 
				
			||||||
		#address-cells = <1>;
 | 
							#address-cells = <1>;
 | 
				
			||||||
		#size-cells = <0>;
 | 
							#size-cells = <0>;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 | 
				
			||||||
	vcpu->arch.hcr = HCR_GUEST_MASK;
 | 
						vcpu->arch.hcr = HCR_GUEST_MASK;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return vcpu->arch.hcr;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						vcpu->arch.hcr = hcr;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
 | 
					static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return 1;
 | 
						return 1;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
 | 
				
			||||||
	 * Anything that is not used directly from assembly code goes
 | 
						 * Anything that is not used directly from assembly code goes
 | 
				
			||||||
	 * here.
 | 
						 * here.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	/* dcache set/way operation pending */
 | 
					 | 
				
			||||||
	int last_pcpu;
 | 
					 | 
				
			||||||
	cpumask_t require_dcache_flush;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Don't run the guest on this vcpu */
 | 
						/* Don't run the guest on this vcpu */
 | 
				
			||||||
	bool pause;
 | 
						bool pause;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -44,6 +44,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifndef __ASSEMBLY__
 | 
					#ifndef __ASSEMBLY__
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <linux/highmem.h>
 | 
				
			||||||
#include <asm/cacheflush.h>
 | 
					#include <asm/cacheflush.h>
 | 
				
			||||||
#include <asm/pgalloc.h>
 | 
					#include <asm/pgalloc.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 | 
				
			||||||
	return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
 | 
						return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
 | 
					static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
 | 
				
			||||||
					     unsigned long size,
 | 
										       unsigned long size,
 | 
				
			||||||
					     bool ipa_uncached)
 | 
										       bool ipa_uncached)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
 | 
					 | 
				
			||||||
		kvm_flush_dcache_to_poc((void *)hva, size);
 | 
					 | 
				
			||||||
	
 | 
					 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * If we are going to insert an instruction page and the icache is
 | 
						 * If we are going to insert an instruction page and the icache is
 | 
				
			||||||
	 * either VIPT or PIPT, there is a potential problem where the host
 | 
						 * either VIPT or PIPT, there is a potential problem where the host
 | 
				
			||||||
| 
						 | 
					@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
 | 
				
			||||||
	 *
 | 
						 *
 | 
				
			||||||
	 * VIVT caches are tagged using both the ASID and the VMID and doesn't
 | 
						 * VIVT caches are tagged using both the ASID and the VMID and doesn't
 | 
				
			||||||
	 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
 | 
						 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * We need to do this through a kernel mapping (using the
 | 
				
			||||||
 | 
						 * user-space mapping has proved to be the wrong
 | 
				
			||||||
 | 
						 * solution). For that, we need to kmap one page at a time,
 | 
				
			||||||
 | 
						 * and iterate over the range.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (icache_is_pipt()) {
 | 
					
 | 
				
			||||||
		__cpuc_coherent_user_range(hva, hva + size);
 | 
						bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
 | 
				
			||||||
	} else if (!icache_is_vivt_asid_tagged()) {
 | 
					
 | 
				
			||||||
 | 
						VM_BUG_ON(size & PAGE_MASK);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!need_flush && !icache_is_pipt())
 | 
				
			||||||
 | 
							goto vipt_cache;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						while (size) {
 | 
				
			||||||
 | 
							void *va = kmap_atomic_pfn(pfn);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (need_flush)
 | 
				
			||||||
 | 
								kvm_flush_dcache_to_poc(va, PAGE_SIZE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (icache_is_pipt())
 | 
				
			||||||
 | 
								__cpuc_coherent_user_range((unsigned long)va,
 | 
				
			||||||
 | 
											   (unsigned long)va + PAGE_SIZE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							size -= PAGE_SIZE;
 | 
				
			||||||
 | 
							pfn++;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							kunmap_atomic(va);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					vipt_cache:
 | 
				
			||||||
 | 
						if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
 | 
				
			||||||
		/* any kind of VIPT cache */
 | 
							/* any kind of VIPT cache */
 | 
				
			||||||
		__flush_icache_all();
 | 
							__flush_icache_all();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void __kvm_flush_dcache_pte(pte_t pte)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						void *va = kmap_atomic(pte_page(pte));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						kvm_flush_dcache_to_poc(va, PAGE_SIZE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						kunmap_atomic(va);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned long size = PMD_SIZE;
 | 
				
			||||||
 | 
						pfn_t pfn = pmd_pfn(pmd);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						while (size) {
 | 
				
			||||||
 | 
							void *va = kmap_atomic_pfn(pfn);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							kvm_flush_dcache_to_poc(va, PAGE_SIZE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							pfn++;
 | 
				
			||||||
 | 
							size -= PAGE_SIZE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							kunmap_atomic(va);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void __kvm_flush_dcache_pud(pud_t pud)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define kvm_virt_to_phys(x)		virt_to_idmap((unsigned long)(x))
 | 
					#define kvm_virt_to_phys(x)		virt_to_idmap((unsigned long)(x))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void stage2_flush_vm(struct kvm *kvm);
 | 
					void kvm_set_way_flush(struct kvm_vcpu *vcpu);
 | 
				
			||||||
 | 
					void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif	/* !__ASSEMBLY__ */
 | 
					#endif	/* !__ASSEMBLY__ */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -22,10 +22,12 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
__invalid_entry:
 | 
					__invalid_entry:
 | 
				
			||||||
	v7m_exception_entry
 | 
						v7m_exception_entry
 | 
				
			||||||
 | 
					#ifdef CONFIG_PRINTK
 | 
				
			||||||
	adr	r0, strerr
 | 
						adr	r0, strerr
 | 
				
			||||||
	mrs	r1, ipsr
 | 
						mrs	r1, ipsr
 | 
				
			||||||
	mov	r2, lr
 | 
						mov	r2, lr
 | 
				
			||||||
	bl	printk
 | 
						bl	printk
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
	mov	r0, sp
 | 
						mov	r0, sp
 | 
				
			||||||
	bl	show_regs
 | 
						bl	show_regs
 | 
				
			||||||
1:	b	1b
 | 
					1:	b	1b
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 | 
				
			||||||
	vcpu->cpu = cpu;
 | 
						vcpu->cpu = cpu;
 | 
				
			||||||
	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 | 
						vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Check whether this vcpu requires the cache to be flushed on
 | 
					 | 
				
			||||||
	 * this physical CPU. This is a consequence of doing dcache
 | 
					 | 
				
			||||||
	 * operations by set/way on this vcpu. We do it here to be in
 | 
					 | 
				
			||||||
	 * a non-preemptible section.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
 | 
					 | 
				
			||||||
		flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	kvm_arm_set_running_vcpu(vcpu);
 | 
						kvm_arm_set_running_vcpu(vcpu);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 | 
				
			||||||
		ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
 | 
							ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		vcpu->mode = OUTSIDE_GUEST_MODE;
 | 
							vcpu->mode = OUTSIDE_GUEST_MODE;
 | 
				
			||||||
		vcpu->arch.last_pcpu = smp_processor_id();
 | 
					 | 
				
			||||||
		kvm_guest_exit();
 | 
							kvm_guest_exit();
 | 
				
			||||||
		trace_kvm_exit(*vcpu_pc(vcpu));
 | 
							trace_kvm_exit(*vcpu_pc(vcpu));
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* See note at ARM ARM B1.14.4 */
 | 
					/*
 | 
				
			||||||
 | 
					 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
static bool access_dcsw(struct kvm_vcpu *vcpu,
 | 
					static bool access_dcsw(struct kvm_vcpu *vcpu,
 | 
				
			||||||
			const struct coproc_params *p,
 | 
								const struct coproc_params *p,
 | 
				
			||||||
			const struct coproc_reg *r)
 | 
								const struct coproc_reg *r)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long val;
 | 
					 | 
				
			||||||
	int cpu;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!p->is_write)
 | 
						if (!p->is_write)
 | 
				
			||||||
		return read_from_write_only(vcpu, p);
 | 
							return read_from_write_only(vcpu, p);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpu = get_cpu();
 | 
						kvm_set_way_flush(vcpu);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	cpumask_setall(&vcpu->arch.require_dcache_flush);
 | 
					 | 
				
			||||||
	cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* If we were already preempted, take the long way around */
 | 
					 | 
				
			||||||
	if (cpu != vcpu->arch.last_pcpu) {
 | 
					 | 
				
			||||||
		flush_cache_all();
 | 
					 | 
				
			||||||
		goto done;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	val = *vcpu_reg(vcpu, p->Rt1);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	switch (p->CRm) {
 | 
					 | 
				
			||||||
	case 6:			/* Upgrade DCISW to DCCISW, as per HCR.SWIO */
 | 
					 | 
				
			||||||
	case 14:		/* DCCISW */
 | 
					 | 
				
			||||||
		asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	case 10:		/* DCCSW */
 | 
					 | 
				
			||||||
		asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
done:
 | 
					 | 
				
			||||||
	put_cpu();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Generic accessor for VM registers. Only called as long as HCR_TVM
 | 
					 * Generic accessor for VM registers. Only called as long as HCR_TVM
 | 
				
			||||||
 * is set.
 | 
					 * is set.  If the guest enables the MMU, we stop trapping the VM
 | 
				
			||||||
 | 
					 * sys_regs and leave it in complete control of the caches.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Used by the cpu-specific code.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static bool access_vm_reg(struct kvm_vcpu *vcpu,
 | 
					bool access_vm_reg(struct kvm_vcpu *vcpu,
 | 
				
			||||||
			  const struct coproc_params *p,
 | 
							   const struct coproc_params *p,
 | 
				
			||||||
			  const struct coproc_reg *r)
 | 
							   const struct coproc_reg *r)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						bool was_enabled = vcpu_has_cache_enabled(vcpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	BUG_ON(!p->is_write);
 | 
						BUG_ON(!p->is_write);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
 | 
						vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
 | 
				
			||||||
	if (p->is_64bit)
 | 
						if (p->is_64bit)
 | 
				
			||||||
		vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
 | 
							vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						kvm_toggle_cache(vcpu, was_enabled);
 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * SCTLR accessor. Only called as long as HCR_TVM is set.  If the
 | 
					 | 
				
			||||||
 * guest enables the MMU, we stop trapping the VM sys_regs and leave
 | 
					 | 
				
			||||||
 * it in complete control of the caches.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Used by the cpu-specific code.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
bool access_sctlr(struct kvm_vcpu *vcpu,
 | 
					 | 
				
			||||||
		  const struct coproc_params *p,
 | 
					 | 
				
			||||||
		  const struct coproc_reg *r)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	access_vm_reg(vcpu, p, r);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (vcpu_has_cache_enabled(vcpu)) {	/* MMU+Caches enabled? */
 | 
					 | 
				
			||||||
		vcpu->arch.hcr &= ~HCR_TVM;
 | 
					 | 
				
			||||||
		stage2_flush_vm(vcpu->kvm);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
 | 
				
			||||||
#define is64		.is_64 = true
 | 
					#define is64		.is_64 = true
 | 
				
			||||||
#define is32		.is_64 = false
 | 
					#define is32		.is_64 = false
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool access_sctlr(struct kvm_vcpu *vcpu,
 | 
					bool access_vm_reg(struct kvm_vcpu *vcpu,
 | 
				
			||||||
		  const struct coproc_params *p,
 | 
							   const struct coproc_params *p,
 | 
				
			||||||
		  const struct coproc_reg *r);
 | 
							   const struct coproc_reg *r);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
 | 
					#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -34,7 +34,7 @@
 | 
				
			||||||
static const struct coproc_reg a15_regs[] = {
 | 
					static const struct coproc_reg a15_regs[] = {
 | 
				
			||||||
	/* SCTLR: swapped by interrupt.S. */
 | 
						/* SCTLR: swapped by interrupt.S. */
 | 
				
			||||||
	{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
 | 
						{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
 | 
				
			||||||
			access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
 | 
								access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct kvm_coproc_target_table a15_target_table = {
 | 
					static struct kvm_coproc_target_table a15_target_table = {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -37,7 +37,7 @@
 | 
				
			||||||
static const struct coproc_reg a7_regs[] = {
 | 
					static const struct coproc_reg a7_regs[] = {
 | 
				
			||||||
	/* SCTLR: swapped by interrupt.S. */
 | 
						/* SCTLR: swapped by interrupt.S. */
 | 
				
			||||||
	{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
 | 
						{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
 | 
				
			||||||
			access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
 | 
								access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct kvm_coproc_target_table a7_target_table = {
 | 
					static struct kvm_coproc_target_table a7_target_table = {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 | 
				
			||||||
		kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
 | 
							kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * D-Cache management functions. They take the page table entries by
 | 
				
			||||||
 | 
					 * value, as they are flushing the cache using the kernel mapping (or
 | 
				
			||||||
 | 
					 * kmap on 32bit).
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static void kvm_flush_dcache_pte(pte_t pte)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						__kvm_flush_dcache_pte(pte);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void kvm_flush_dcache_pmd(pmd_t pmd)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						__kvm_flush_dcache_pmd(pmd);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void kvm_flush_dcache_pud(pud_t pud)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						__kvm_flush_dcache_pud(pud);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
 | 
					static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
 | 
				
			||||||
				  int min, int max)
 | 
									  int min, int max)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
 | 
				
			||||||
	put_page(virt_to_page(pmd));
 | 
						put_page(virt_to_page(pmd));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Unmapping vs dcache management:
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * If a guest maps certain memory pages as uncached, all writes will
 | 
				
			||||||
 | 
					 * bypass the data cache and go directly to RAM.  However, the CPUs
 | 
				
			||||||
 | 
					 * can still speculate reads (not writes) and fill cache lines with
 | 
				
			||||||
 | 
					 * data.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Those cache lines will be *clean* cache lines though, so a
 | 
				
			||||||
 | 
					 * clean+invalidate operation is equivalent to an invalidate
 | 
				
			||||||
 | 
					 * operation, because no cache lines are marked dirty.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Those clean cache lines could be filled prior to an uncached write
 | 
				
			||||||
 | 
					 * by the guest, and the cache coherent IO subsystem would therefore
 | 
				
			||||||
 | 
					 * end up writing old data to disk.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * This is why right after unmapping a page/section and invalidating
 | 
				
			||||||
 | 
					 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
 | 
				
			||||||
 | 
					 * the IO subsystem will never hit in the cache.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
 | 
					static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
 | 
				
			||||||
		       phys_addr_t addr, phys_addr_t end)
 | 
							       phys_addr_t addr, phys_addr_t end)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
 | 
				
			||||||
	start_pte = pte = pte_offset_kernel(pmd, addr);
 | 
						start_pte = pte = pte_offset_kernel(pmd, addr);
 | 
				
			||||||
	do {
 | 
						do {
 | 
				
			||||||
		if (!pte_none(*pte)) {
 | 
							if (!pte_none(*pte)) {
 | 
				
			||||||
 | 
								pte_t old_pte = *pte;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			kvm_set_pte(pte, __pte(0));
 | 
								kvm_set_pte(pte, __pte(0));
 | 
				
			||||||
			put_page(virt_to_page(pte));
 | 
					 | 
				
			||||||
			kvm_tlb_flush_vmid_ipa(kvm, addr);
 | 
								kvm_tlb_flush_vmid_ipa(kvm, addr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								/* No need to invalidate the cache for device mappings */
 | 
				
			||||||
 | 
								if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
 | 
				
			||||||
 | 
									kvm_flush_dcache_pte(old_pte);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								put_page(virt_to_page(pte));
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} while (pte++, addr += PAGE_SIZE, addr != end);
 | 
						} while (pte++, addr += PAGE_SIZE, addr != end);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
 | 
				
			||||||
		next = kvm_pmd_addr_end(addr, end);
 | 
							next = kvm_pmd_addr_end(addr, end);
 | 
				
			||||||
		if (!pmd_none(*pmd)) {
 | 
							if (!pmd_none(*pmd)) {
 | 
				
			||||||
			if (kvm_pmd_huge(*pmd)) {
 | 
								if (kvm_pmd_huge(*pmd)) {
 | 
				
			||||||
 | 
									pmd_t old_pmd = *pmd;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				pmd_clear(pmd);
 | 
									pmd_clear(pmd);
 | 
				
			||||||
				kvm_tlb_flush_vmid_ipa(kvm, addr);
 | 
									kvm_tlb_flush_vmid_ipa(kvm, addr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
									kvm_flush_dcache_pmd(old_pmd);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				put_page(virt_to_page(pmd));
 | 
									put_page(virt_to_page(pmd));
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				unmap_ptes(kvm, pmd, addr, next);
 | 
									unmap_ptes(kvm, pmd, addr, next);
 | 
				
			||||||
| 
						 | 
					@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
 | 
				
			||||||
		next = kvm_pud_addr_end(addr, end);
 | 
							next = kvm_pud_addr_end(addr, end);
 | 
				
			||||||
		if (!pud_none(*pud)) {
 | 
							if (!pud_none(*pud)) {
 | 
				
			||||||
			if (pud_huge(*pud)) {
 | 
								if (pud_huge(*pud)) {
 | 
				
			||||||
 | 
									pud_t old_pud = *pud;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				pud_clear(pud);
 | 
									pud_clear(pud);
 | 
				
			||||||
				kvm_tlb_flush_vmid_ipa(kvm, addr);
 | 
									kvm_tlb_flush_vmid_ipa(kvm, addr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
									kvm_flush_dcache_pud(old_pud);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				put_page(virt_to_page(pud));
 | 
									put_page(virt_to_page(pud));
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				unmap_pmds(kvm, pud, addr, next);
 | 
									unmap_pmds(kvm, pud, addr, next);
 | 
				
			||||||
| 
						 | 
					@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pte = pte_offset_kernel(pmd, addr);
 | 
						pte = pte_offset_kernel(pmd, addr);
 | 
				
			||||||
	do {
 | 
						do {
 | 
				
			||||||
		if (!pte_none(*pte)) {
 | 
							if (!pte_none(*pte) &&
 | 
				
			||||||
			hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
 | 
							    (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
 | 
				
			||||||
			kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
 | 
								kvm_flush_dcache_pte(*pte);
 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	} while (pte++, addr += PAGE_SIZE, addr != end);
 | 
						} while (pte++, addr += PAGE_SIZE, addr != end);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
 | 
				
			||||||
	do {
 | 
						do {
 | 
				
			||||||
		next = kvm_pmd_addr_end(addr, end);
 | 
							next = kvm_pmd_addr_end(addr, end);
 | 
				
			||||||
		if (!pmd_none(*pmd)) {
 | 
							if (!pmd_none(*pmd)) {
 | 
				
			||||||
			if (kvm_pmd_huge(*pmd)) {
 | 
								if (kvm_pmd_huge(*pmd))
 | 
				
			||||||
				hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
 | 
									kvm_flush_dcache_pmd(*pmd);
 | 
				
			||||||
				kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
 | 
								else
 | 
				
			||||||
			} else {
 | 
					 | 
				
			||||||
				stage2_flush_ptes(kvm, pmd, addr, next);
 | 
									stage2_flush_ptes(kvm, pmd, addr, next);
 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} while (pmd++, addr = next, addr != end);
 | 
						} while (pmd++, addr = next, addr != end);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
 | 
				
			||||||
	do {
 | 
						do {
 | 
				
			||||||
		next = kvm_pud_addr_end(addr, end);
 | 
							next = kvm_pud_addr_end(addr, end);
 | 
				
			||||||
		if (!pud_none(*pud)) {
 | 
							if (!pud_none(*pud)) {
 | 
				
			||||||
			if (pud_huge(*pud)) {
 | 
								if (pud_huge(*pud))
 | 
				
			||||||
				hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
 | 
									kvm_flush_dcache_pud(*pud);
 | 
				
			||||||
				kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
 | 
								else
 | 
				
			||||||
			} else {
 | 
					 | 
				
			||||||
				stage2_flush_pmds(kvm, pud, addr, next);
 | 
									stage2_flush_pmds(kvm, pud, addr, next);
 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} while (pud++, addr = next, addr != end);
 | 
						} while (pud++, addr = next, addr != end);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
 | 
				
			||||||
 * Go through the stage 2 page tables and invalidate any cache lines
 | 
					 * Go through the stage 2 page tables and invalidate any cache lines
 | 
				
			||||||
 * backing memory already mapped to the VM.
 | 
					 * backing memory already mapped to the VM.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void stage2_flush_vm(struct kvm *kvm)
 | 
					static void stage2_flush_vm(struct kvm *kvm)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kvm_memslots *slots;
 | 
						struct kvm_memslots *slots;
 | 
				
			||||||
	struct kvm_memory_slot *memslot;
 | 
						struct kvm_memory_slot *memslot;
 | 
				
			||||||
| 
						 | 
					@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
 | 
				
			||||||
	return !pfn_valid(pfn);
 | 
						return !pfn_valid(pfn);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
 | 
				
			||||||
 | 
									      unsigned long size, bool uncached)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						__coherent_cache_guest_page(vcpu, pfn, size, uncached);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 | 
					static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 | 
				
			||||||
			  struct kvm_memory_slot *memslot, unsigned long hva,
 | 
								  struct kvm_memory_slot *memslot, unsigned long hva,
 | 
				
			||||||
			  unsigned long fault_status)
 | 
								  unsigned long fault_status)
 | 
				
			||||||
| 
						 | 
					@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 | 
				
			||||||
			kvm_set_s2pmd_writable(&new_pmd);
 | 
								kvm_set_s2pmd_writable(&new_pmd);
 | 
				
			||||||
			kvm_set_pfn_dirty(pfn);
 | 
								kvm_set_pfn_dirty(pfn);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
 | 
							coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
 | 
				
			||||||
					  fault_ipa_uncached);
 | 
					 | 
				
			||||||
		ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
 | 
							ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		pte_t new_pte = pfn_pte(pfn, mem_type);
 | 
							pte_t new_pte = pfn_pte(pfn, mem_type);
 | 
				
			||||||
| 
						 | 
					@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 | 
				
			||||||
			kvm_set_s2pte_writable(&new_pte);
 | 
								kvm_set_s2pte_writable(&new_pte);
 | 
				
			||||||
			kvm_set_pfn_dirty(pfn);
 | 
								kvm_set_pfn_dirty(pfn);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
 | 
							coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
 | 
				
			||||||
					  fault_ipa_uncached);
 | 
					 | 
				
			||||||
		ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
 | 
							ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
 | 
				
			||||||
			pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
 | 
								pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 | 
				
			||||||
	unmap_stage2_range(kvm, gpa, size);
 | 
						unmap_stage2_range(kvm, gpa, size);
 | 
				
			||||||
	spin_unlock(&kvm->mmu_lock);
 | 
						spin_unlock(&kvm->mmu_lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Main problems:
 | 
				
			||||||
 | 
					 * - S/W ops are local to a CPU (not broadcast)
 | 
				
			||||||
 | 
					 * - We have line migration behind our back (speculation)
 | 
				
			||||||
 | 
					 * - System caches don't support S/W at all (damn!)
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * In the face of the above, the best we can do is to try and convert
 | 
				
			||||||
 | 
					 * S/W ops to VA ops. Because the guest is not allowed to infer the
 | 
				
			||||||
 | 
					 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
 | 
				
			||||||
 | 
					 * which is a rather good thing for us.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Also, it is only used when turning caches on/off ("The expected
 | 
				
			||||||
 | 
					 * usage of the cache maintenance instructions that operate by set/way
 | 
				
			||||||
 | 
					 * is associated with the cache maintenance instructions associated
 | 
				
			||||||
 | 
					 * with the powerdown and powerup of caches, if this is required by
 | 
				
			||||||
 | 
					 * the implementation.").
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * We use the following policy:
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * - If we trap a S/W operation, we enable VM trapping to detect
 | 
				
			||||||
 | 
					 *   caches being turned on/off, and do a full clean.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * - We flush the caches on both caches being turned on and off.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * - Once the caches are enabled, we stop trapping VM ops.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void kvm_set_way_flush(struct kvm_vcpu *vcpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned long hcr = vcpu_get_hcr(vcpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * If this is the first time we do a S/W operation
 | 
				
			||||||
 | 
						 * (i.e. HCR_TVM not set) flush the whole memory, and set the
 | 
				
			||||||
 | 
						 * VM trapping.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * Otherwise, rely on the VM trapping to wait for the MMU +
 | 
				
			||||||
 | 
						 * Caches to be turned off. At that point, we'll be able to
 | 
				
			||||||
 | 
						 * clean the caches again.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (!(hcr & HCR_TVM)) {
 | 
				
			||||||
 | 
							trace_kvm_set_way_flush(*vcpu_pc(vcpu),
 | 
				
			||||||
 | 
										vcpu_has_cache_enabled(vcpu));
 | 
				
			||||||
 | 
							stage2_flush_vm(vcpu->kvm);
 | 
				
			||||||
 | 
							vcpu_set_hcr(vcpu, hcr | HCR_TVM);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						bool now_enabled = vcpu_has_cache_enabled(vcpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * If switching the MMU+caches on, need to invalidate the caches.
 | 
				
			||||||
 | 
						 * If switching it off, need to clean the caches.
 | 
				
			||||||
 | 
						 * Clean + invalidate does the trick always.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (now_enabled != was_enabled)
 | 
				
			||||||
 | 
							stage2_flush_vm(vcpu->kvm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Caches are now on, stop trapping VM ops (until a S/W op) */
 | 
				
			||||||
 | 
						if (now_enabled)
 | 
				
			||||||
 | 
							vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc,
 | 
				
			||||||
		  __entry->vcpu_pc, __entry->r0, __entry->imm)
 | 
							  __entry->vcpu_pc, __entry->r0, __entry->imm)
 | 
				
			||||||
);
 | 
					);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					TRACE_EVENT(kvm_set_way_flush,
 | 
				
			||||||
 | 
						    TP_PROTO(unsigned long vcpu_pc, bool cache),
 | 
				
			||||||
 | 
						    TP_ARGS(vcpu_pc, cache),
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						    TP_STRUCT__entry(
 | 
				
			||||||
 | 
							    __field(	unsigned long,	vcpu_pc		)
 | 
				
			||||||
 | 
							    __field(	bool,		cache		)
 | 
				
			||||||
 | 
						    ),
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						    TP_fast_assign(
 | 
				
			||||||
 | 
							    __entry->vcpu_pc		= vcpu_pc;
 | 
				
			||||||
 | 
							    __entry->cache		= cache;
 | 
				
			||||||
 | 
						    ),
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						    TP_printk("S/W flush at 0x%016lx (cache %s)",
 | 
				
			||||||
 | 
							      __entry->vcpu_pc, __entry->cache ? "on" : "off")
 | 
				
			||||||
 | 
					);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					TRACE_EVENT(kvm_toggle_cache,
 | 
				
			||||||
 | 
						    TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
 | 
				
			||||||
 | 
						    TP_ARGS(vcpu_pc, was, now),
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						    TP_STRUCT__entry(
 | 
				
			||||||
 | 
							    __field(	unsigned long,	vcpu_pc		)
 | 
				
			||||||
 | 
							    __field(	bool,		was		)
 | 
				
			||||||
 | 
							    __field(	bool,		now		)
 | 
				
			||||||
 | 
						    ),
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						    TP_fast_assign(
 | 
				
			||||||
 | 
							    __entry->vcpu_pc		= vcpu_pc;
 | 
				
			||||||
 | 
							    __entry->was		= was;
 | 
				
			||||||
 | 
							    __entry->now		= now;
 | 
				
			||||||
 | 
						    ),
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						    TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
 | 
				
			||||||
 | 
							      __entry->vcpu_pc, __entry->was ? "on" : "off",
 | 
				
			||||||
 | 
							      __entry->now ? "on" : "off")
 | 
				
			||||||
 | 
					);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _TRACE_KVM_H */
 | 
					#endif /* _TRACE_KVM_H */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#undef TRACE_INCLUDE_PATH
 | 
					#undef TRACE_INCLUDE_PATH
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -189,6 +189,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
 | 
				
			||||||
	coherency_cpu_base = of_iomap(np, 0);
 | 
						coherency_cpu_base = of_iomap(np, 0);
 | 
				
			||||||
	arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
 | 
						arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * We should switch the PL310 to I/O coherency mode only if
 | 
				
			||||||
 | 
						 * I/O coherency is actually enabled.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (!coherency_available())
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Add the PL310 property "arm,io-coherent". This makes sure the
 | 
						 * Add the PL310 property "arm,io-coherent". This makes sure the
 | 
				
			||||||
	 * outer sync operation is not used, which allows to
 | 
						 * outer sync operation is not used, which allows to
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -18,6 +18,8 @@
 | 
				
			||||||
#include <linux/gpio_keys.h>
 | 
					#include <linux/gpio_keys.h>
 | 
				
			||||||
#include <linux/input.h>
 | 
					#include <linux/input.h>
 | 
				
			||||||
#include <linux/interrupt.h>
 | 
					#include <linux/interrupt.h>
 | 
				
			||||||
 | 
					#include <linux/irqchip.h>
 | 
				
			||||||
 | 
					#include <linux/irqchip/arm-gic.h>
 | 
				
			||||||
#include <linux/kernel.h>
 | 
					#include <linux/kernel.h>
 | 
				
			||||||
#include <linux/mfd/tmio.h>
 | 
					#include <linux/mfd/tmio.h>
 | 
				
			||||||
#include <linux/mmc/host.h>
 | 
					#include <linux/mmc/host.h>
 | 
				
			||||||
| 
						 | 
					@ -273,6 +275,22 @@ static void __init ape6evm_add_standard_devices(void)
 | 
				
			||||||
				      sizeof(ape6evm_leds_pdata));
 | 
									      sizeof(ape6evm_leds_pdata));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void __init ape6evm_legacy_init_time(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/* Do not invoke DT-based timers via clocksource_of_init() */
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void __init ape6evm_legacy_init_irq(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
 | 
				
			||||||
 | 
						void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						gic_init(0, 29, gic_dist_base, gic_cpu_base);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Do not invoke DT-based interrupt code via irqchip_init() */
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const char *ape6evm_boards_compat_dt[] __initdata = {
 | 
					static const char *ape6evm_boards_compat_dt[] __initdata = {
 | 
				
			||||||
	"renesas,ape6evm",
 | 
						"renesas,ape6evm",
 | 
				
			||||||
	NULL,
 | 
						NULL,
 | 
				
			||||||
| 
						 | 
					@ -280,7 +298,9 @@ static const char *ape6evm_boards_compat_dt[] __initdata = {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DT_MACHINE_START(APE6EVM_DT, "ape6evm")
 | 
					DT_MACHINE_START(APE6EVM_DT, "ape6evm")
 | 
				
			||||||
	.init_early	= shmobile_init_delay,
 | 
						.init_early	= shmobile_init_delay,
 | 
				
			||||||
 | 
						.init_irq       = ape6evm_legacy_init_irq,
 | 
				
			||||||
	.init_machine	= ape6evm_add_standard_devices,
 | 
						.init_machine	= ape6evm_add_standard_devices,
 | 
				
			||||||
	.init_late	= shmobile_init_late,
 | 
						.init_late	= shmobile_init_late,
 | 
				
			||||||
	.dt_compat	= ape6evm_boards_compat_dt,
 | 
						.dt_compat	= ape6evm_boards_compat_dt,
 | 
				
			||||||
 | 
						.init_time	= ape6evm_legacy_init_time,
 | 
				
			||||||
MACHINE_END
 | 
					MACHINE_END
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -21,6 +21,8 @@
 | 
				
			||||||
#include <linux/input.h>
 | 
					#include <linux/input.h>
 | 
				
			||||||
#include <linux/interrupt.h>
 | 
					#include <linux/interrupt.h>
 | 
				
			||||||
#include <linux/irq.h>
 | 
					#include <linux/irq.h>
 | 
				
			||||||
 | 
					#include <linux/irqchip.h>
 | 
				
			||||||
 | 
					#include <linux/irqchip/arm-gic.h>
 | 
				
			||||||
#include <linux/kernel.h>
 | 
					#include <linux/kernel.h>
 | 
				
			||||||
#include <linux/leds.h>
 | 
					#include <linux/leds.h>
 | 
				
			||||||
#include <linux/mfd/tmio.h>
 | 
					#include <linux/mfd/tmio.h>
 | 
				
			||||||
| 
						 | 
					@ -811,6 +813,16 @@ static void __init lager_init(void)
 | 
				
			||||||
					  lager_ksz8041_fixup);
 | 
										  lager_ksz8041_fixup);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void __init lager_legacy_init_irq(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
 | 
				
			||||||
 | 
						void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						gic_init(0, 29, gic_dist_base, gic_cpu_base);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Do not invoke DT-based interrupt code via irqchip_init() */
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const char * const lager_boards_compat_dt[] __initconst = {
 | 
					static const char * const lager_boards_compat_dt[] __initconst = {
 | 
				
			||||||
	"renesas,lager",
 | 
						"renesas,lager",
 | 
				
			||||||
	NULL,
 | 
						NULL,
 | 
				
			||||||
| 
						 | 
					@ -819,6 +831,7 @@ static const char * const lager_boards_compat_dt[] __initconst = {
 | 
				
			||||||
DT_MACHINE_START(LAGER_DT, "lager")
 | 
					DT_MACHINE_START(LAGER_DT, "lager")
 | 
				
			||||||
	.smp		= smp_ops(r8a7790_smp_ops),
 | 
						.smp		= smp_ops(r8a7790_smp_ops),
 | 
				
			||||||
	.init_early	= shmobile_init_delay,
 | 
						.init_early	= shmobile_init_delay,
 | 
				
			||||||
 | 
						.init_irq	= lager_legacy_init_irq,
 | 
				
			||||||
	.init_time	= rcar_gen2_timer_init,
 | 
						.init_time	= rcar_gen2_timer_init,
 | 
				
			||||||
	.init_machine	= lager_init,
 | 
						.init_machine	= lager_init,
 | 
				
			||||||
	.init_late	= shmobile_init_late,
 | 
						.init_late	= shmobile_init_late,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -133,7 +133,9 @@ void __init rcar_gen2_timer_init(void)
 | 
				
			||||||
#ifdef CONFIG_COMMON_CLK
 | 
					#ifdef CONFIG_COMMON_CLK
 | 
				
			||||||
	rcar_gen2_clocks_init(mode);
 | 
						rcar_gen2_clocks_init(mode);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					#ifdef CONFIG_ARCH_SHMOBILE_MULTI
 | 
				
			||||||
	clocksource_of_init();
 | 
						clocksource_of_init();
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct memory_reserve_config {
 | 
					struct memory_reserve_config {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -70,6 +70,18 @@ void __init shmobile_init_delay(void)
 | 
				
			||||||
	if (!max_freq)
 | 
						if (!max_freq)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
 | 
				
			||||||
 | 
						/* Non-multiplatform r8a73a4 SoC cannot use arch timer due
 | 
				
			||||||
 | 
						 * to GIC being initialized from C and arch timer via DT */
 | 
				
			||||||
 | 
						if (of_machine_is_compatible("renesas,r8a73a4"))
 | 
				
			||||||
 | 
							has_arch_timer = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Non-multiplatform r8a7790 SoC cannot use arch timer due
 | 
				
			||||||
 | 
						 * to GIC being initialized from C and arch timer via DT */
 | 
				
			||||||
 | 
						if (of_machine_is_compatible("renesas,r8a7790"))
 | 
				
			||||||
 | 
							has_arch_timer = false;
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
 | 
						if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
 | 
				
			||||||
		if (is_a7_a8_a9)
 | 
							if (is_a7_a8_a9)
 | 
				
			||||||
			shmobile_setup_delay_hz(max_freq, 1, 3);
 | 
								shmobile_setup_delay_hz(max_freq, 1, 3);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config ARM_KERNMEM_PERMS
 | 
					config ARM_KERNMEM_PERMS
 | 
				
			||||||
	bool "Restrict kernel memory permissions"
 | 
						bool "Restrict kernel memory permissions"
 | 
				
			||||||
 | 
						depends on MMU
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  If this is set, kernel memory other than kernel text (and rodata)
 | 
						  If this is set, kernel memory other than kernel text (and rodata)
 | 
				
			||||||
	  will be made non-executable. The tradeoff is that each region is
 | 
						  will be made non-executable. The tradeoff is that each region is
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu)
 | 
				
			||||||
	/* Update the list of reserved ASIDs and the ASID bitmap. */
 | 
						/* Update the list of reserved ASIDs and the ASID bitmap. */
 | 
				
			||||||
	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
 | 
						bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
 | 
				
			||||||
	for_each_possible_cpu(i) {
 | 
						for_each_possible_cpu(i) {
 | 
				
			||||||
		if (i == cpu) {
 | 
							asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
 | 
				
			||||||
			asid = 0;
 | 
							/*
 | 
				
			||||||
		} else {
 | 
							 * If this CPU has already been through a
 | 
				
			||||||
			asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
 | 
							 * rollover, but hasn't run another task in
 | 
				
			||||||
			/*
 | 
							 * the meantime, we must preserve its reserved
 | 
				
			||||||
			 * If this CPU has already been through a
 | 
							 * ASID, as this is the only trace we have of
 | 
				
			||||||
			 * rollover, but hasn't run another task in
 | 
							 * the process it is still running.
 | 
				
			||||||
			 * the meantime, we must preserve its reserved
 | 
							 */
 | 
				
			||||||
			 * ASID, as this is the only trace we have of
 | 
							if (asid == 0)
 | 
				
			||||||
			 * the process it is still running.
 | 
								asid = per_cpu(reserved_asids, i);
 | 
				
			||||||
			 */
 | 
							__set_bit(asid & ~ASID_MASK, asid_map);
 | 
				
			||||||
			if (asid == 0)
 | 
					 | 
				
			||||||
				asid = per_cpu(reserved_asids, i);
 | 
					 | 
				
			||||||
			__set_bit(asid & ~ASID_MASK, asid_map);
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		per_cpu(reserved_asids, i) = asid;
 | 
							per_cpu(reserved_asids, i) = asid;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1940,18 +1940,8 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
 | 
					EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					static int __arm_iommu_attach_device(struct device *dev,
 | 
				
			||||||
 * arm_iommu_attach_device
 | 
									     struct dma_iommu_mapping *mapping)
 | 
				
			||||||
 * @dev: valid struct device pointer
 | 
					 | 
				
			||||||
 * @mapping: io address space mapping structure (returned from
 | 
					 | 
				
			||||||
 *	arm_iommu_create_mapping)
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Attaches specified io address space mapping to the provided device,
 | 
					 | 
				
			||||||
 * More than one client might be attached to the same io address space
 | 
					 | 
				
			||||||
 * mapping.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
int arm_iommu_attach_device(struct device *dev,
 | 
					 | 
				
			||||||
			    struct dma_iommu_mapping *mapping)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int err;
 | 
						int err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1965,15 +1955,35 @@ int arm_iommu_attach_device(struct device *dev,
 | 
				
			||||||
	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
 | 
						pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * arm_iommu_detach_device
 | 
					 * arm_iommu_attach_device
 | 
				
			||||||
 * @dev: valid struct device pointer
 | 
					 * @dev: valid struct device pointer
 | 
				
			||||||
 | 
					 * @mapping: io address space mapping structure (returned from
 | 
				
			||||||
 | 
					 *	arm_iommu_create_mapping)
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Detaches the provided device from a previously attached map.
 | 
					 * Attaches specified io address space mapping to the provided device.
 | 
				
			||||||
 | 
					 * This replaces the dma operations (dma_map_ops pointer) with the
 | 
				
			||||||
 | 
					 * IOMMU aware version.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * More than one client might be attached to the same io address space
 | 
				
			||||||
 | 
					 * mapping.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void arm_iommu_detach_device(struct device *dev)
 | 
					int arm_iommu_attach_device(struct device *dev,
 | 
				
			||||||
 | 
								    struct dma_iommu_mapping *mapping)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err = __arm_iommu_attach_device(dev, mapping);
 | 
				
			||||||
 | 
						if (err)
 | 
				
			||||||
 | 
							return err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						set_dma_ops(dev, &iommu_ops);
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void __arm_iommu_detach_device(struct device *dev)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dma_iommu_mapping *mapping;
 | 
						struct dma_iommu_mapping *mapping;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1989,6 +1999,19 @@ void arm_iommu_detach_device(struct device *dev)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
 | 
						pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * arm_iommu_detach_device
 | 
				
			||||||
 | 
					 * @dev: valid struct device pointer
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Detaches the provided device from a previously attached map.
 | 
				
			||||||
 | 
					 * This voids the dma operations (dma_map_ops pointer)
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void arm_iommu_detach_device(struct device *dev)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						__arm_iommu_detach_device(dev);
 | 
				
			||||||
 | 
						set_dma_ops(dev, NULL);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
 | 
					EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
 | 
					static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
 | 
				
			||||||
| 
						 | 
					@ -2011,7 +2034,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (arm_iommu_attach_device(dev, mapping)) {
 | 
						if (__arm_iommu_attach_device(dev, mapping)) {
 | 
				
			||||||
		pr_warn("Failed to attached device %s to IOMMU_mapping\n",
 | 
							pr_warn("Failed to attached device %s to IOMMU_mapping\n",
 | 
				
			||||||
				dev_name(dev));
 | 
									dev_name(dev));
 | 
				
			||||||
		arm_iommu_release_mapping(mapping);
 | 
							arm_iommu_release_mapping(mapping);
 | 
				
			||||||
| 
						 | 
					@ -2025,7 +2048,10 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
 | 
						struct dma_iommu_mapping *mapping = dev->archdata.mapping;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	arm_iommu_detach_device(dev);
 | 
						if (!mapping)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						__arm_iommu_detach_device(dev);
 | 
				
			||||||
	arm_iommu_release_mapping(mapping);
 | 
						arm_iommu_release_mapping(mapping);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -45,6 +45,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 | 
				
			||||||
		vcpu->arch.hcr_el2 &= ~HCR_RW;
 | 
							vcpu->arch.hcr_el2 &= ~HCR_RW;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return vcpu->arch.hcr_el2;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						vcpu->arch.hcr_el2 = hcr;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
 | 
					static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
 | 
						return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -116,9 +116,6 @@ struct kvm_vcpu_arch {
 | 
				
			||||||
	 * Anything that is not used directly from assembly code goes
 | 
						 * Anything that is not used directly from assembly code goes
 | 
				
			||||||
	 * here.
 | 
						 * here.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	/* dcache set/way operation pending */
 | 
					 | 
				
			||||||
	int last_pcpu;
 | 
					 | 
				
			||||||
	cpumask_t require_dcache_flush;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Don't run the guest */
 | 
						/* Don't run the guest */
 | 
				
			||||||
	bool pause;
 | 
						bool pause;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 | 
				
			||||||
	return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
 | 
						return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
 | 
					static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
 | 
				
			||||||
					     unsigned long size,
 | 
										       unsigned long size,
 | 
				
			||||||
					     bool ipa_uncached)
 | 
										       bool ipa_uncached)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						void *va = page_address(pfn_to_page(pfn));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
 | 
						if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
 | 
				
			||||||
		kvm_flush_dcache_to_poc((void *)hva, size);
 | 
							kvm_flush_dcache_to_poc(va, size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!icache_is_aliasing()) {		/* PIPT */
 | 
						if (!icache_is_aliasing()) {		/* PIPT */
 | 
				
			||||||
		flush_icache_range(hva, hva + size);
 | 
							flush_icache_range((unsigned long)va,
 | 
				
			||||||
 | 
									   (unsigned long)va + size);
 | 
				
			||||||
	} else if (!icache_is_aivivt()) {	/* non ASID-tagged VIVT */
 | 
						} else if (!icache_is_aivivt()) {	/* non ASID-tagged VIVT */
 | 
				
			||||||
		/* any kind of VIPT cache */
 | 
							/* any kind of VIPT cache */
 | 
				
			||||||
		__flush_icache_all();
 | 
							__flush_icache_all();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void __kvm_flush_dcache_pte(pte_t pte)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct page *page = pte_page(pte);
 | 
				
			||||||
 | 
						kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct page *page = pmd_page(pmd);
 | 
				
			||||||
 | 
						kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void __kvm_flush_dcache_pud(pud_t pud)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct page *page = pud_page(pud);
 | 
				
			||||||
 | 
						kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define kvm_virt_to_phys(x)		__virt_to_phys((unsigned long)(x))
 | 
					#define kvm_virt_to_phys(x)		__virt_to_phys((unsigned long)(x))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void stage2_flush_vm(struct kvm *kvm);
 | 
					void kvm_set_way_flush(struct kvm_vcpu *vcpu);
 | 
				
			||||||
 | 
					void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* __ASSEMBLY__ */
 | 
					#endif /* __ASSEMBLY__ */
 | 
				
			||||||
#endif /* __ARM64_KVM_MMU_H__ */
 | 
					#endif /* __ARM64_KVM_MMU_H__ */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr)
 | 
				
			||||||
	return ccsidr;
 | 
						return ccsidr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void do_dc_cisw(u32 val)
 | 
					/*
 | 
				
			||||||
{
 | 
					 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 | 
				
			||||||
	asm volatile("dc cisw, %x0" : : "r" (val));
 | 
					 */
 | 
				
			||||||
	dsb(ish);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void do_dc_csw(u32 val)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	asm volatile("dc csw, %x0" : : "r" (val));
 | 
					 | 
				
			||||||
	dsb(ish);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/* See note at ARM ARM B1.14.4 */
 | 
					 | 
				
			||||||
static bool access_dcsw(struct kvm_vcpu *vcpu,
 | 
					static bool access_dcsw(struct kvm_vcpu *vcpu,
 | 
				
			||||||
			const struct sys_reg_params *p,
 | 
								const struct sys_reg_params *p,
 | 
				
			||||||
			const struct sys_reg_desc *r)
 | 
								const struct sys_reg_desc *r)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long val;
 | 
					 | 
				
			||||||
	int cpu;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!p->is_write)
 | 
						if (!p->is_write)
 | 
				
			||||||
		return read_from_write_only(vcpu, p);
 | 
							return read_from_write_only(vcpu, p);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpu = get_cpu();
 | 
						kvm_set_way_flush(vcpu);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	cpumask_setall(&vcpu->arch.require_dcache_flush);
 | 
					 | 
				
			||||||
	cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* If we were already preempted, take the long way around */
 | 
					 | 
				
			||||||
	if (cpu != vcpu->arch.last_pcpu) {
 | 
					 | 
				
			||||||
		flush_cache_all();
 | 
					 | 
				
			||||||
		goto done;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	val = *vcpu_reg(vcpu, p->Rt);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	switch (p->CRm) {
 | 
					 | 
				
			||||||
	case 6:			/* Upgrade DCISW to DCCISW, as per HCR.SWIO */
 | 
					 | 
				
			||||||
	case 14:		/* DCCISW */
 | 
					 | 
				
			||||||
		do_dc_cisw(val);
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	case 10:		/* DCCSW */
 | 
					 | 
				
			||||||
		do_dc_csw(val);
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
done:
 | 
					 | 
				
			||||||
	put_cpu();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Generic accessor for VM registers. Only called as long as HCR_TVM
 | 
					 * Generic accessor for VM registers. Only called as long as HCR_TVM
 | 
				
			||||||
 * is set.
 | 
					 * is set. If the guest enables the MMU, we stop trapping the VM
 | 
				
			||||||
 | 
					 * sys_regs and leave it in complete control of the caches.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static bool access_vm_reg(struct kvm_vcpu *vcpu,
 | 
					static bool access_vm_reg(struct kvm_vcpu *vcpu,
 | 
				
			||||||
			  const struct sys_reg_params *p,
 | 
								  const struct sys_reg_params *p,
 | 
				
			||||||
			  const struct sys_reg_desc *r)
 | 
								  const struct sys_reg_desc *r)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long val;
 | 
						unsigned long val;
 | 
				
			||||||
 | 
						bool was_enabled = vcpu_has_cache_enabled(vcpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	BUG_ON(!p->is_write);
 | 
						BUG_ON(!p->is_write);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
 | 
				
			||||||
		vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
 | 
							vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						kvm_toggle_cache(vcpu, was_enabled);
 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set.  If the
 | 
					 | 
				
			||||||
 * guest enables the MMU, we stop trapping the VM sys_regs and leave
 | 
					 | 
				
			||||||
 * it in complete control of the caches.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static bool access_sctlr(struct kvm_vcpu *vcpu,
 | 
					 | 
				
			||||||
			 const struct sys_reg_params *p,
 | 
					 | 
				
			||||||
			 const struct sys_reg_desc *r)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	access_vm_reg(vcpu, p, r);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (vcpu_has_cache_enabled(vcpu)) {	/* MMU+Caches enabled? */
 | 
					 | 
				
			||||||
		vcpu->arch.hcr_el2 &= ~HCR_TVM;
 | 
					 | 
				
			||||||
		stage2_flush_vm(vcpu->kvm);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 | 
				
			||||||
	  NULL, reset_mpidr, MPIDR_EL1 },
 | 
						  NULL, reset_mpidr, MPIDR_EL1 },
 | 
				
			||||||
	/* SCTLR_EL1 */
 | 
						/* SCTLR_EL1 */
 | 
				
			||||||
	{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
 | 
						{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
 | 
				
			||||||
	  access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
 | 
						  access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
 | 
				
			||||||
	/* CPACR_EL1 */
 | 
						/* CPACR_EL1 */
 | 
				
			||||||
	{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
 | 
						{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
 | 
				
			||||||
	  NULL, reset_val, CPACR_EL1, 0 },
 | 
						  NULL, reset_val, CPACR_EL1, 0 },
 | 
				
			||||||
| 
						 | 
					@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
 | 
				
			||||||
 * register).
 | 
					 * register).
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static const struct sys_reg_desc cp15_regs[] = {
 | 
					static const struct sys_reg_desc cp15_regs[] = {
 | 
				
			||||||
	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
 | 
						{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
 | 
				
			||||||
	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
 | 
						{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
 | 
				
			||||||
	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
 | 
						{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
 | 
				
			||||||
	{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
 | 
						{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -142,6 +142,8 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -176,6 +176,8 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -172,6 +172,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (fault & VM_FAULT_OOM) {
 | 
							if (fault & VM_FAULT_OOM) {
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							} else if (fault & VM_FAULT_SIGSEGV) {
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		} else if (fault & VM_FAULT_SIGBUS) {
 | 
							} else if (fault & VM_FAULT_SIGBUS) {
 | 
				
			||||||
			signal = SIGBUS;
 | 
								signal = SIGBUS;
 | 
				
			||||||
			goto bad_area;
 | 
								goto bad_area;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -200,6 +200,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -145,6 +145,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto map_err;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto bus_err;
 | 
								goto bus_err;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -141,6 +141,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -224,6 +224,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -158,6 +158,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -262,6 +262,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -135,6 +135,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -171,6 +171,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -256,6 +256,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto bad_area;
 | 
								goto bad_area;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
 | 
				
			||||||
		if (*flt & VM_FAULT_OOM) {
 | 
							if (*flt & VM_FAULT_OOM) {
 | 
				
			||||||
			ret = -ENOMEM;
 | 
								ret = -ENOMEM;
 | 
				
			||||||
			goto out_unlock;
 | 
								goto out_unlock;
 | 
				
			||||||
		} else if (*flt & VM_FAULT_SIGBUS) {
 | 
							} else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
 | 
				
			||||||
			ret = -EFAULT;
 | 
								ret = -EFAULT;
 | 
				
			||||||
			goto out_unlock;
 | 
								goto out_unlock;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -437,6 +437,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	fault = handle_mm_fault(mm, vma, address, flags);
 | 
						fault = handle_mm_fault(mm, vma, address, flags);
 | 
				
			||||||
	if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
 | 
						if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
 | 
				
			||||||
 | 
							if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		rc = mm_fault_error(regs, address, fault);
 | 
							rc = mm_fault_error(regs, address, fault);
 | 
				
			||||||
		if (rc >= MM_FAULT_RETURN)
 | 
							if (rc >= MM_FAULT_RETURN)
 | 
				
			||||||
			goto bail;
 | 
								goto bail;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
 | 
				
			||||||
				do_no_context(regs);
 | 
									do_no_context(regs);
 | 
				
			||||||
			else
 | 
								else
 | 
				
			||||||
				pagefault_out_of_memory();
 | 
									pagefault_out_of_memory();
 | 
				
			||||||
 | 
							} else if (fault & VM_FAULT_SIGSEGV) {
 | 
				
			||||||
 | 
								/* Kernel mode? Handle exceptions or die */
 | 
				
			||||||
 | 
								if (!user_mode(regs))
 | 
				
			||||||
 | 
									do_no_context(regs);
 | 
				
			||||||
 | 
								else
 | 
				
			||||||
 | 
									do_sigsegv(regs, SEGV_MAPERR);
 | 
				
			||||||
		} else if (fault & VM_FAULT_SIGBUS) {
 | 
							} else if (fault & VM_FAULT_SIGBUS) {
 | 
				
			||||||
			/* Kernel mode? Handle exceptions or die */
 | 
								/* Kernel mode? Handle exceptions or die */
 | 
				
			||||||
			if (!user_mode(regs))
 | 
								if (!user_mode(regs))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -114,6 +114,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		if (fault & VM_FAULT_SIGBUS)
 | 
							if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			do_sigbus(regs, error_code, address);
 | 
								do_sigbus(regs, error_code, address);
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								bad_area(regs, error_code, address);
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
			BUG();
 | 
								BUG();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -249,6 +249,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -446,6 +446,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -442,6 +442,8 @@ static int handle_page_fault(struct pt_regs *regs,
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -80,6 +80,8 @@ int handle_page_fault(unsigned long address, unsigned long ip,
 | 
				
			||||||
		if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
							if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
			if (fault & VM_FAULT_OOM) {
 | 
								if (fault & VM_FAULT_OOM) {
 | 
				
			||||||
				goto out_of_memory;
 | 
									goto out_of_memory;
 | 
				
			||||||
 | 
								} else if (fault & VM_FAULT_SIGSEGV) {
 | 
				
			||||||
 | 
									goto out;
 | 
				
			||||||
			} else if (fault & VM_FAULT_SIGBUS) {
 | 
								} else if (fault & VM_FAULT_SIGBUS) {
 | 
				
			||||||
				err = -EACCES;
 | 
									err = -EACCES;
 | 
				
			||||||
				goto out;
 | 
									goto out;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void)
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	case 55: /* 22nm Atom "Silvermont"                */
 | 
						case 55: /* 22nm Atom "Silvermont"                */
 | 
				
			||||||
 | 
						case 76: /* 14nm Atom "Airmont"                   */
 | 
				
			||||||
	case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
 | 
						case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
 | 
				
			||||||
		memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
 | 
							memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
 | 
				
			||||||
			sizeof(hw_cache_event_ids));
 | 
								sizeof(hw_cache_event_ids));
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -142,7 +142,7 @@ static inline u64 rapl_scale(u64 v)
 | 
				
			||||||
	 * or use ldexp(count, -32).
 | 
						 * or use ldexp(count, -32).
 | 
				
			||||||
	 * Watts = Joules/Time delta
 | 
						 * Watts = Joules/Time delta
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	return v << (32 - __this_cpu_read(rapl_pmu->hw_unit));
 | 
						return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static u64 rapl_event_update(struct perf_event *event)
 | 
					static u64 rapl_event_update(struct perf_event *event)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
 | 
				
			||||||
	box->phys_id = phys_id;
 | 
						box->phys_id = phys_id;
 | 
				
			||||||
	box->pci_dev = pdev;
 | 
						box->pci_dev = pdev;
 | 
				
			||||||
	box->pmu = pmu;
 | 
						box->pmu = pmu;
 | 
				
			||||||
	uncore_box_init(box);
 | 
					 | 
				
			||||||
	pci_set_drvdata(pdev, box);
 | 
						pci_set_drvdata(pdev, box);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	raw_spin_lock(&uncore_box_lock);
 | 
						raw_spin_lock(&uncore_box_lock);
 | 
				
			||||||
| 
						 | 
					@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu)
 | 
				
			||||||
			pmu = &type->pmus[j];
 | 
								pmu = &type->pmus[j];
 | 
				
			||||||
			box = *per_cpu_ptr(pmu->box, cpu);
 | 
								box = *per_cpu_ptr(pmu->box, cpu);
 | 
				
			||||||
			/* called by uncore_cpu_init? */
 | 
								/* called by uncore_cpu_init? */
 | 
				
			||||||
			if (box && box->phys_id >= 0) {
 | 
								if (box && box->phys_id >= 0)
 | 
				
			||||||
				uncore_box_init(box);
 | 
					 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			for_each_online_cpu(k) {
 | 
								for_each_online_cpu(k) {
 | 
				
			||||||
				exist = *per_cpu_ptr(pmu->box, k);
 | 
									exist = *per_cpu_ptr(pmu->box, k);
 | 
				
			||||||
| 
						 | 
					@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (box) {
 | 
								if (box)
 | 
				
			||||||
				box->phys_id = phys_id;
 | 
									box->phys_id = phys_id;
 | 
				
			||||||
				uncore_box_init(box);
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box)
 | 
				
			||||||
	return box->pmu->type->num_counters;
 | 
						return box->pmu->type->num_counters;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void uncore_box_init(struct intel_uncore_box *box)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
 | 
				
			||||||
 | 
							if (box->pmu->type->ops->init_box)
 | 
				
			||||||
 | 
								box->pmu->type->ops->init_box(box);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void uncore_disable_box(struct intel_uncore_box *box)
 | 
					static inline void uncore_disable_box(struct intel_uncore_box *box)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (box->pmu->type->ops->disable_box)
 | 
						if (box->pmu->type->ops->disable_box)
 | 
				
			||||||
| 
						 | 
					@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void uncore_enable_box(struct intel_uncore_box *box)
 | 
					static inline void uncore_enable_box(struct intel_uncore_box *box)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						uncore_box_init(box);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (box->pmu->type->ops->enable_box)
 | 
						if (box->pmu->type->ops->enable_box)
 | 
				
			||||||
		box->pmu->type->ops->enable_box(box);
 | 
							box->pmu->type->ops->enable_box(box);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box,
 | 
				
			||||||
	return box->pmu->type->ops->read_counter(box, event);
 | 
						return box->pmu->type->ops->read_counter(box, event);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void uncore_box_init(struct intel_uncore_box *box)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
 | 
					 | 
				
			||||||
		if (box->pmu->type->ops->init_box)
 | 
					 | 
				
			||||||
			box->pmu->type->ops->init_box(box);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
 | 
					static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return (box->phys_id < 0);
 | 
						return (box->phys_id < 0);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm)
 | 
				
			||||||
		u16 cid, lid;
 | 
							u16 cid, lid;
 | 
				
			||||||
		u32 ldr, aid;
 | 
							u32 ldr, aid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (!kvm_apic_present(vcpu))
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		aid = kvm_apic_id(apic);
 | 
							aid = kvm_apic_id(apic);
 | 
				
			||||||
		ldr = kvm_apic_get_reg(apic, APIC_LDR);
 | 
							ldr = kvm_apic_get_reg(apic, APIC_LDR);
 | 
				
			||||||
		cid = apic_cluster_id(new, ldr);
 | 
							cid = apic_cluster_id(new, ldr);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
 | 
				
			||||||
		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
 | 
							if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
 | 
				
			||||||
			     VM_FAULT_HWPOISON_LARGE))
 | 
								     VM_FAULT_HWPOISON_LARGE))
 | 
				
			||||||
			do_sigbus(regs, error_code, address, fault);
 | 
								do_sigbus(regs, error_code, address, fault);
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								bad_area_nosemaphore(regs, error_code, address);
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
			BUG();
 | 
								BUG();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
 | 
				
			||||||
			DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
 | 
								DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	},
 | 
						},
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					                .callback = set_scan_all,
 | 
				
			||||||
 | 
					                .ident = "Stratus/NEC ftServer",
 | 
				
			||||||
 | 
					                .matches = {
 | 
				
			||||||
 | 
					                        DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
 | 
				
			||||||
 | 
					                        DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
 | 
				
			||||||
 | 
					                },
 | 
				
			||||||
 | 
					        },
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					                .callback = set_scan_all,
 | 
				
			||||||
 | 
					                .ident = "Stratus/NEC ftServer",
 | 
				
			||||||
 | 
					                .matches = {
 | 
				
			||||||
 | 
					                        DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
 | 
				
			||||||
 | 
					                        DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
 | 
				
			||||||
 | 
					                },
 | 
				
			||||||
 | 
					        },
 | 
				
			||||||
	{}
 | 
						{}
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -117,6 +117,8 @@ void do_page_fault(struct pt_regs *regs)
 | 
				
			||||||
	if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
						if (unlikely(fault & VM_FAULT_ERROR)) {
 | 
				
			||||||
		if (fault & VM_FAULT_OOM)
 | 
							if (fault & VM_FAULT_OOM)
 | 
				
			||||||
			goto out_of_memory;
 | 
								goto out_of_memory;
 | 
				
			||||||
 | 
							else if (fault & VM_FAULT_SIGSEGV)
 | 
				
			||||||
 | 
								goto bad_area;
 | 
				
			||||||
		else if (fault & VM_FAULT_SIGBUS)
 | 
							else if (fault & VM_FAULT_SIGBUS)
 | 
				
			||||||
			goto do_sigbus;
 | 
								goto do_sigbus;
 | 
				
			||||||
		BUG();
 | 
							BUG();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -15,26 +15,6 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void blk_mq_sysfs_release(struct kobject *kobj)
 | 
					static void blk_mq_sysfs_release(struct kobject *kobj)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct request_queue *q;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	q = container_of(kobj, struct request_queue, mq_kobj);
 | 
					 | 
				
			||||||
	free_percpu(q->queue_ctx);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void blk_mq_ctx_release(struct kobject *kobj)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct blk_mq_ctx *ctx;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
 | 
					 | 
				
			||||||
	kobject_put(&ctx->queue->mq_kobj);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void blk_mq_hctx_release(struct kobject *kobj)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct blk_mq_hw_ctx *hctx;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
 | 
					 | 
				
			||||||
	kfree(hctx);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct blk_mq_ctx_sysfs_entry {
 | 
					struct blk_mq_ctx_sysfs_entry {
 | 
				
			||||||
| 
						 | 
					@ -338,13 +318,13 @@ static struct kobj_type blk_mq_ktype = {
 | 
				
			||||||
static struct kobj_type blk_mq_ctx_ktype = {
 | 
					static struct kobj_type blk_mq_ctx_ktype = {
 | 
				
			||||||
	.sysfs_ops	= &blk_mq_sysfs_ops,
 | 
						.sysfs_ops	= &blk_mq_sysfs_ops,
 | 
				
			||||||
	.default_attrs	= default_ctx_attrs,
 | 
						.default_attrs	= default_ctx_attrs,
 | 
				
			||||||
	.release	= blk_mq_ctx_release,
 | 
						.release	= blk_mq_sysfs_release,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct kobj_type blk_mq_hw_ktype = {
 | 
					static struct kobj_type blk_mq_hw_ktype = {
 | 
				
			||||||
	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
 | 
						.sysfs_ops	= &blk_mq_hw_sysfs_ops,
 | 
				
			||||||
	.default_attrs	= default_hw_ctx_attrs,
 | 
						.default_attrs	= default_hw_ctx_attrs,
 | 
				
			||||||
	.release	= blk_mq_hctx_release,
 | 
						.release	= blk_mq_sysfs_release,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
 | 
					static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
 | 
				
			||||||
| 
						 | 
					@ -375,7 +355,6 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	hctx_for_each_ctx(hctx, ctx, i) {
 | 
						hctx_for_each_ctx(hctx, ctx, i) {
 | 
				
			||||||
		kobject_get(&q->mq_kobj);
 | 
					 | 
				
			||||||
		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
 | 
							ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
 | 
				
			||||||
		if (ret)
 | 
							if (ret)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1867,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
 | 
				
			||||||
	mutex_unlock(&set->tag_list_lock);
 | 
						mutex_unlock(&set->tag_list_lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * It is the actual release handler for mq, but we do it from
 | 
				
			||||||
 | 
					 * request queue's release handler for avoiding use-after-free
 | 
				
			||||||
 | 
					 * and headache because q->mq_kobj shouldn't have been introduced,
 | 
				
			||||||
 | 
					 * but we can't group ctx/kctx kobj without it.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void blk_mq_release(struct request_queue *q)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct blk_mq_hw_ctx *hctx;
 | 
				
			||||||
 | 
						unsigned int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* hctx kobj stays in hctx */
 | 
				
			||||||
 | 
						queue_for_each_hw_ctx(q, hctx, i)
 | 
				
			||||||
 | 
							kfree(hctx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						kfree(q->queue_hw_ctx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* ctx kobj stays in queue_ctx */
 | 
				
			||||||
 | 
						free_percpu(q->queue_ctx);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 | 
					struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct blk_mq_hw_ctx **hctxs;
 | 
						struct blk_mq_hw_ctx **hctxs;
 | 
				
			||||||
| 
						 | 
					@ -2000,10 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	percpu_ref_exit(&q->mq_usage_counter);
 | 
						percpu_ref_exit(&q->mq_usage_counter);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kfree(q->queue_hw_ctx);
 | 
					 | 
				
			||||||
	kfree(q->mq_map);
 | 
						kfree(q->mq_map);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	q->queue_hw_ctx = NULL;
 | 
					 | 
				
			||||||
	q->mq_map = NULL;
 | 
						q->mq_map = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mutex_lock(&all_q_mutex);
 | 
						mutex_lock(&all_q_mutex);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -62,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
 | 
					extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void blk_mq_release(struct request_queue *q);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Basic implementation of sparser bitmap, allowing the user to spread
 | 
					 * Basic implementation of sparser bitmap, allowing the user to spread
 | 
				
			||||||
 * the bits over more cachelines.
 | 
					 * the bits over more cachelines.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!q->mq_ops)
 | 
						if (!q->mq_ops)
 | 
				
			||||||
		blk_free_flush_queue(q->fq);
 | 
							blk_free_flush_queue(q->fq);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							blk_mq_release(q);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	blk_trace_shutdown(q);
 | 
						blk_trace_shutdown(q);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -134,8 +134,6 @@ source "drivers/staging/Kconfig"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
source "drivers/platform/Kconfig"
 | 
					source "drivers/platform/Kconfig"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
source "drivers/soc/Kconfig"
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
source "drivers/clk/Kconfig"
 | 
					source "drivers/clk/Kconfig"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
source "drivers/hwspinlock/Kconfig"
 | 
					source "drivers/hwspinlock/Kconfig"
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,7 +1,7 @@
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * ACPI support for Intel Lynxpoint LPSS.
 | 
					 * ACPI support for Intel Lynxpoint LPSS.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Copyright (C) 2013, 2014, Intel Corporation
 | 
					 * Copyright (C) 2013, Intel Corporation
 | 
				
			||||||
 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
 | 
					 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
 | 
				
			||||||
 *          Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 | 
					 *          Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
| 
						 | 
					@ -60,8 +60,6 @@ ACPI_MODULE_NAME("acpi_lpss");
 | 
				
			||||||
#define LPSS_CLK_DIVIDER		BIT(2)
 | 
					#define LPSS_CLK_DIVIDER		BIT(2)
 | 
				
			||||||
#define LPSS_LTR			BIT(3)
 | 
					#define LPSS_LTR			BIT(3)
 | 
				
			||||||
#define LPSS_SAVE_CTX			BIT(4)
 | 
					#define LPSS_SAVE_CTX			BIT(4)
 | 
				
			||||||
#define LPSS_DEV_PROXY			BIT(5)
 | 
					 | 
				
			||||||
#define LPSS_PROXY_REQ			BIT(6)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct lpss_private_data;
 | 
					struct lpss_private_data;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -72,10 +70,8 @@ struct lpss_device_desc {
 | 
				
			||||||
	void (*setup)(struct lpss_private_data *pdata);
 | 
						void (*setup)(struct lpss_private_data *pdata);
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct device *proxy_device;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static struct lpss_device_desc lpss_dma_desc = {
 | 
					static struct lpss_device_desc lpss_dma_desc = {
 | 
				
			||||||
	.flags = LPSS_CLK | LPSS_PROXY_REQ,
 | 
						.flags = LPSS_CLK,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct lpss_private_data {
 | 
					struct lpss_private_data {
 | 
				
			||||||
| 
						 | 
					@ -150,24 +146,22 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct lpss_device_desc byt_uart_dev_desc = {
 | 
					static struct lpss_device_desc byt_uart_dev_desc = {
 | 
				
			||||||
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
 | 
						.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
 | 
				
			||||||
		 LPSS_DEV_PROXY,
 | 
					 | 
				
			||||||
	.prv_offset = 0x800,
 | 
						.prv_offset = 0x800,
 | 
				
			||||||
	.setup = lpss_uart_setup,
 | 
						.setup = lpss_uart_setup,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct lpss_device_desc byt_spi_dev_desc = {
 | 
					static struct lpss_device_desc byt_spi_dev_desc = {
 | 
				
			||||||
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
 | 
						.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
 | 
				
			||||||
		 LPSS_DEV_PROXY,
 | 
					 | 
				
			||||||
	.prv_offset = 0x400,
 | 
						.prv_offset = 0x400,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct lpss_device_desc byt_sdio_dev_desc = {
 | 
					static struct lpss_device_desc byt_sdio_dev_desc = {
 | 
				
			||||||
	.flags = LPSS_CLK | LPSS_DEV_PROXY,
 | 
						.flags = LPSS_CLK,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct lpss_device_desc byt_i2c_dev_desc = {
 | 
					static struct lpss_device_desc byt_i2c_dev_desc = {
 | 
				
			||||||
	.flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY,
 | 
						.flags = LPSS_CLK | LPSS_SAVE_CTX,
 | 
				
			||||||
	.prv_offset = 0x800,
 | 
						.prv_offset = 0x800,
 | 
				
			||||||
	.setup = byt_i2c_setup,
 | 
						.setup = byt_i2c_setup,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					@ -374,8 +368,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
 | 
				
			||||||
	adev->driver_data = pdata;
 | 
						adev->driver_data = pdata;
 | 
				
			||||||
	pdev = acpi_create_platform_device(adev);
 | 
						pdev = acpi_create_platform_device(adev);
 | 
				
			||||||
	if (!IS_ERR_OR_NULL(pdev)) {
 | 
						if (!IS_ERR_OR_NULL(pdev)) {
 | 
				
			||||||
		if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY)
 | 
					 | 
				
			||||||
			proxy_device = &pdev->dev;
 | 
					 | 
				
			||||||
		return 1;
 | 
							return 1;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -600,14 +592,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
 | 
				
			||||||
	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
 | 
						if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
 | 
				
			||||||
		acpi_lpss_save_ctx(dev, pdata);
 | 
							acpi_lpss_save_ctx(dev, pdata);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = acpi_dev_runtime_suspend(dev);
 | 
						return acpi_dev_runtime_suspend(dev);
 | 
				
			||||||
	if (ret)
 | 
					 | 
				
			||||||
		return ret;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device)
 | 
					 | 
				
			||||||
		return pm_runtime_put_sync_suspend(proxy_device);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int acpi_lpss_runtime_resume(struct device *dev)
 | 
					static int acpi_lpss_runtime_resume(struct device *dev)
 | 
				
			||||||
| 
						 | 
					@ -615,12 +600,6 @@ static int acpi_lpss_runtime_resume(struct device *dev)
 | 
				
			||||||
	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
 | 
						struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) {
 | 
					 | 
				
			||||||
		ret = pm_runtime_get_sync(proxy_device);
 | 
					 | 
				
			||||||
		if (ret)
 | 
					 | 
				
			||||||
			return ret;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	ret = acpi_dev_runtime_resume(dev);
 | 
						ret = acpi_dev_runtime_resume(dev);
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
 | 
				
			||||||
 * If an image has a non-zero parent overlap, get a reference to its
 | 
					 * If an image has a non-zero parent overlap, get a reference to its
 | 
				
			||||||
 * parent.
 | 
					 * parent.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * We must get the reference before checking for the overlap to
 | 
					 | 
				
			||||||
 * coordinate properly with zeroing the parent overlap in
 | 
					 | 
				
			||||||
 * rbd_dev_v2_parent_info() when an image gets flattened.  We
 | 
					 | 
				
			||||||
 * drop it again if there is no overlap.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Returns true if the rbd device has a parent with a non-zero
 | 
					 * Returns true if the rbd device has a parent with a non-zero
 | 
				
			||||||
 * overlap and a reference for it was successfully taken, or
 | 
					 * overlap and a reference for it was successfully taken, or
 | 
				
			||||||
 * false otherwise.
 | 
					 * false otherwise.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
 | 
					static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int counter;
 | 
						int counter = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!rbd_dev->parent_spec)
 | 
						if (!rbd_dev->parent_spec)
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
 | 
						down_read(&rbd_dev->header_rwsem);
 | 
				
			||||||
	if (counter > 0 && rbd_dev->parent_overlap)
 | 
						if (rbd_dev->parent_overlap)
 | 
				
			||||||
		return true;
 | 
							counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
 | 
				
			||||||
 | 
						up_read(&rbd_dev->header_rwsem);
 | 
				
			||||||
	/* Image was flattened, but parent is not yet torn down */
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (counter < 0)
 | 
						if (counter < 0)
 | 
				
			||||||
		rbd_warn(rbd_dev, "parent reference overflow");
 | 
							rbd_warn(rbd_dev, "parent reference overflow");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return false;
 | 
						return counter > 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (rbd_dev->parent_overlap) {
 | 
							if (rbd_dev->parent_overlap) {
 | 
				
			||||||
			rbd_dev->parent_overlap = 0;
 | 
								rbd_dev->parent_overlap = 0;
 | 
				
			||||||
			smp_mb();
 | 
					 | 
				
			||||||
			rbd_dev_parent_put(rbd_dev);
 | 
								rbd_dev_parent_put(rbd_dev);
 | 
				
			||||||
			pr_info("%s: clone image has been flattened\n",
 | 
								pr_info("%s: clone image has been flattened\n",
 | 
				
			||||||
				rbd_dev->disk->disk_name);
 | 
									rbd_dev->disk->disk_name);
 | 
				
			||||||
| 
						 | 
					@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
 | 
				
			||||||
	 * treat it specially.
 | 
						 * treat it specially.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	rbd_dev->parent_overlap = overlap;
 | 
						rbd_dev->parent_overlap = overlap;
 | 
				
			||||||
	smp_mb();
 | 
					 | 
				
			||||||
	if (!overlap) {
 | 
						if (!overlap) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* A null parent_spec indicates it's the initial probe */
 | 
							/* A null parent_spec indicates it's the initial probe */
 | 
				
			||||||
| 
						 | 
					@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct rbd_image_header	*header;
 | 
						struct rbd_image_header	*header;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Drop parent reference unless it's already been done (or none) */
 | 
						rbd_dev_parent_put(rbd_dev);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (rbd_dev->parent_overlap)
 | 
					 | 
				
			||||||
		rbd_dev_parent_put(rbd_dev);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Free dynamic fields from the header, then zero it out */
 | 
						/* Free dynamic fields from the header, then zero it out */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -801,9 +801,11 @@ static int mcp230xx_probe(struct i2c_client *client,
 | 
				
			||||||
		client->irq = irq_of_parse_and_map(client->dev.of_node, 0);
 | 
							client->irq = irq_of_parse_and_map(client->dev.of_node, 0);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		pdata = dev_get_platdata(&client->dev);
 | 
							pdata = dev_get_platdata(&client->dev);
 | 
				
			||||||
		if (!pdata || !gpio_is_valid(pdata->base)) {
 | 
							if (!pdata) {
 | 
				
			||||||
			dev_dbg(&client->dev, "invalid platform data\n");
 | 
								pdata = devm_kzalloc(&client->dev,
 | 
				
			||||||
			return -EINVAL;
 | 
										sizeof(struct mcp23s08_platform_data),
 | 
				
			||||||
 | 
										GFP_KERNEL);
 | 
				
			||||||
 | 
								pdata->base = -1;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -924,10 +926,11 @@ static int mcp23s08_probe(struct spi_device *spi)
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		type = spi_get_device_id(spi)->driver_data;
 | 
							type = spi_get_device_id(spi)->driver_data;
 | 
				
			||||||
		pdata = dev_get_platdata(&spi->dev);
 | 
							pdata = dev_get_platdata(&spi->dev);
 | 
				
			||||||
		if (!pdata || !gpio_is_valid(pdata->base)) {
 | 
							if (!pdata) {
 | 
				
			||||||
			dev_dbg(&spi->dev,
 | 
								pdata = devm_kzalloc(&spi->dev,
 | 
				
			||||||
					"invalid or missing platform data\n");
 | 
										sizeof(struct mcp23s08_platform_data),
 | 
				
			||||||
			return -EINVAL;
 | 
										GFP_KERNEL);
 | 
				
			||||||
 | 
								pdata->base = -1;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
 | 
							for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -88,6 +88,8 @@ struct gpio_bank {
 | 
				
			||||||
#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
 | 
					#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
 | 
				
			||||||
#define LINE_USED(line, offset) (line & (BIT(offset)))
 | 
					#define LINE_USED(line, offset) (line & (BIT(offset)))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void omap_gpio_unmask_irq(struct irq_data *d);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
 | 
					static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return bank->chip.base + gpio_irq;
 | 
						return bank->chip.base + gpio_irq;
 | 
				
			||||||
| 
						 | 
					@ -477,6 +479,16 @@ static int omap_gpio_is_input(struct gpio_bank *bank, int mask)
 | 
				
			||||||
	return readl_relaxed(reg) & mask;
 | 
						return readl_relaxed(reg) & mask;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio,
 | 
				
			||||||
 | 
								       unsigned offset)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (!LINE_USED(bank->mod_usage, offset)) {
 | 
				
			||||||
 | 
							omap_enable_gpio_module(bank, offset);
 | 
				
			||||||
 | 
							omap_set_gpio_direction(bank, offset, 1);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
 | 
					static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct gpio_bank *bank = omap_irq_data_get_bank(d);
 | 
						struct gpio_bank *bank = omap_irq_data_get_bank(d);
 | 
				
			||||||
| 
						 | 
					@ -506,15 +518,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
 | 
				
			||||||
	spin_lock_irqsave(&bank->lock, flags);
 | 
						spin_lock_irqsave(&bank->lock, flags);
 | 
				
			||||||
	offset = GPIO_INDEX(bank, gpio);
 | 
						offset = GPIO_INDEX(bank, gpio);
 | 
				
			||||||
	retval = omap_set_gpio_triggering(bank, offset, type);
 | 
						retval = omap_set_gpio_triggering(bank, offset, type);
 | 
				
			||||||
	if (!LINE_USED(bank->mod_usage, offset)) {
 | 
						omap_gpio_init_irq(bank, gpio, offset);
 | 
				
			||||||
		omap_enable_gpio_module(bank, offset);
 | 
						if (!omap_gpio_is_input(bank, BIT(offset))) {
 | 
				
			||||||
		omap_set_gpio_direction(bank, offset, 1);
 | 
					 | 
				
			||||||
	} else if (!omap_gpio_is_input(bank, BIT(offset))) {
 | 
					 | 
				
			||||||
		spin_unlock_irqrestore(&bank->lock, flags);
 | 
							spin_unlock_irqrestore(&bank->lock, flags);
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					 | 
				
			||||||
	bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
 | 
					 | 
				
			||||||
	spin_unlock_irqrestore(&bank->lock, flags);
 | 
						spin_unlock_irqrestore(&bank->lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
 | 
						if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
 | 
				
			||||||
| 
						 | 
					@ -792,6 +800,24 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
 | 
				
			||||||
	pm_runtime_put(bank->dev);
 | 
						pm_runtime_put(bank->dev);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static unsigned int omap_gpio_irq_startup(struct irq_data *d)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct gpio_bank *bank = omap_irq_data_get_bank(d);
 | 
				
			||||||
 | 
						unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
 | 
				
			||||||
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
						unsigned offset = GPIO_INDEX(bank, gpio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!BANK_USED(bank))
 | 
				
			||||||
 | 
							pm_runtime_get_sync(bank->dev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						spin_lock_irqsave(&bank->lock, flags);
 | 
				
			||||||
 | 
						omap_gpio_init_irq(bank, gpio, offset);
 | 
				
			||||||
 | 
						spin_unlock_irqrestore(&bank->lock, flags);
 | 
				
			||||||
 | 
						omap_gpio_unmask_irq(d);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void omap_gpio_irq_shutdown(struct irq_data *d)
 | 
					static void omap_gpio_irq_shutdown(struct irq_data *d)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct gpio_bank *bank = omap_irq_data_get_bank(d);
 | 
						struct gpio_bank *bank = omap_irq_data_get_bank(d);
 | 
				
			||||||
| 
						 | 
					@ -1181,6 +1207,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
 | 
				
			||||||
	if (!irqc)
 | 
						if (!irqc)
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						irqc->irq_startup = omap_gpio_irq_startup,
 | 
				
			||||||
	irqc->irq_shutdown = omap_gpio_irq_shutdown,
 | 
						irqc->irq_shutdown = omap_gpio_irq_shutdown,
 | 
				
			||||||
	irqc->irq_ack = omap_gpio_ack_irq,
 | 
						irqc->irq_ack = omap_gpio_ack_irq,
 | 
				
			||||||
	irqc->irq_mask = omap_gpio_mask_irq,
 | 
						irqc->irq_mask = omap_gpio_mask_irq,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -648,6 +648,7 @@ int gpiod_export_link(struct device *dev, const char *name,
 | 
				
			||||||
		if (tdev != NULL) {
 | 
							if (tdev != NULL) {
 | 
				
			||||||
			status = sysfs_create_link(&dev->kobj, &tdev->kobj,
 | 
								status = sysfs_create_link(&dev->kobj, &tdev->kobj,
 | 
				
			||||||
						name);
 | 
											name);
 | 
				
			||||||
 | 
								put_device(tdev);
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			status = -ENODEV;
 | 
								status = -ENODEV;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -695,7 +696,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	status = sysfs_set_active_low(desc, dev, value);
 | 
						status = sysfs_set_active_low(desc, dev, value);
 | 
				
			||||||
 | 
						put_device(dev);
 | 
				
			||||||
unlock:
 | 
					unlock:
 | 
				
			||||||
	mutex_unlock(&sysfs_lock);
 | 
						mutex_unlock(&sysfs_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -26,6 +26,7 @@
 | 
				
			||||||
#include <linux/slab.h>
 | 
					#include <linux/slab.h>
 | 
				
			||||||
#include "kfd_priv.h"
 | 
					#include "kfd_priv.h"
 | 
				
			||||||
#include "kfd_device_queue_manager.h"
 | 
					#include "kfd_device_queue_manager.h"
 | 
				
			||||||
 | 
					#include "kfd_pm4_headers.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define MQD_SIZE_ALIGNED 768
 | 
					#define MQD_SIZE_ALIGNED 768
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 | 
				
			||||||
	kfd->shared_resources = *gpu_resources;
 | 
						kfd->shared_resources = *gpu_resources;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* calculate max size of mqds needed for queues */
 | 
						/* calculate max size of mqds needed for queues */
 | 
				
			||||||
	size = max_num_of_processes *
 | 
						size = max_num_of_queues_per_device *
 | 
				
			||||||
		max_num_of_queues_per_process *
 | 
								kfd->device_info->mqd_size_aligned;
 | 
				
			||||||
		kfd->device_info->mqd_size_aligned;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* add another 512KB for all other allocations on gart */
 | 
						/* add another 512KB for all other allocations on gart */
 | 
				
			||||||
	size += 512 * 1024;
 | 
						size += 512 * 1024;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mutex_lock(&dqm->lock);
 | 
						mutex_lock(&dqm->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (dqm->total_queue_count >= max_num_of_queues_per_device) {
 | 
				
			||||||
 | 
							pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
 | 
				
			||||||
 | 
									dqm->total_queue_count);
 | 
				
			||||||
 | 
							mutex_unlock(&dqm->lock);
 | 
				
			||||||
 | 
							return -EPERM;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (list_empty(&qpd->queues_list)) {
 | 
						if (list_empty(&qpd->queues_list)) {
 | 
				
			||||||
		retval = allocate_vmid(dqm, qpd, q);
 | 
							retval = allocate_vmid(dqm, qpd, q);
 | 
				
			||||||
		if (retval != 0) {
 | 
							if (retval != 0) {
 | 
				
			||||||
| 
						 | 
					@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 | 
				
			||||||
	list_add(&q->list, &qpd->queues_list);
 | 
						list_add(&q->list, &qpd->queues_list);
 | 
				
			||||||
	dqm->queue_count++;
 | 
						dqm->queue_count++;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Unconditionally increment this counter, regardless of the queue's
 | 
				
			||||||
 | 
						 * type or whether the queue is active.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						dqm->total_queue_count++;
 | 
				
			||||||
 | 
						pr_debug("Total of %d queues are accountable so far\n",
 | 
				
			||||||
 | 
								dqm->total_queue_count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mutex_unlock(&dqm->lock);
 | 
						mutex_unlock(&dqm->lock);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
 | 
				
			||||||
	if (list_empty(&qpd->queues_list))
 | 
						if (list_empty(&qpd->queues_list))
 | 
				
			||||||
		deallocate_vmid(dqm, qpd, q);
 | 
							deallocate_vmid(dqm, qpd, q);
 | 
				
			||||||
	dqm->queue_count--;
 | 
						dqm->queue_count--;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Unconditionally decrement this counter, regardless of the queue's
 | 
				
			||||||
 | 
						 * type
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						dqm->total_queue_count--;
 | 
				
			||||||
 | 
						pr_debug("Total of %d queues are accountable so far\n",
 | 
				
			||||||
 | 
								dqm->total_queue_count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	mutex_unlock(&dqm->lock);
 | 
						mutex_unlock(&dqm->lock);
 | 
				
			||||||
	return retval;
 | 
						return retval;
 | 
				
			||||||
| 
						 | 
					@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < pipes_num; i++) {
 | 
						for (i = 0; i < pipes_num; i++) {
 | 
				
			||||||
		inx = i + first_pipe;
 | 
							inx = i + first_pipe;
 | 
				
			||||||
 | 
							/*
 | 
				
			||||||
 | 
							 * HPD buffer on GTT is allocated by amdkfd, no need to waste
 | 
				
			||||||
 | 
							 * space in GTT for pipelines we don't initialize
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
		pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
 | 
							pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
 | 
				
			||||||
		pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
 | 
							pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
 | 
				
			||||||
		/* = log2(bytes/4)-1 */
 | 
							/* = log2(bytes/4)-1 */
 | 
				
			||||||
		kfd2kgd->init_pipeline(dqm->dev->kgd, i,
 | 
							kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
 | 
				
			||||||
				CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
 | 
									CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pr_debug("kfd: In %s\n", __func__);
 | 
						pr_debug("kfd: In %s\n", __func__);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
 | 
						retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
 | 
				
			||||||
	if (retval != 0)
 | 
						if (retval != 0)
 | 
				
			||||||
		return retval;
 | 
							return retval;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
 | 
				
			||||||
	pr_debug("kfd: In func %s\n", __func__);
 | 
						pr_debug("kfd: In func %s\n", __func__);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mutex_lock(&dqm->lock);
 | 
						mutex_lock(&dqm->lock);
 | 
				
			||||||
 | 
						if (dqm->total_queue_count >= max_num_of_queues_per_device) {
 | 
				
			||||||
 | 
							pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
 | 
				
			||||||
 | 
									dqm->total_queue_count);
 | 
				
			||||||
 | 
							mutex_unlock(&dqm->lock);
 | 
				
			||||||
 | 
							return -EPERM;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Unconditionally increment this counter, regardless of the queue's
 | 
				
			||||||
 | 
						 * type or whether the queue is active.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						dqm->total_queue_count++;
 | 
				
			||||||
 | 
						pr_debug("Total of %d queues are accountable so far\n",
 | 
				
			||||||
 | 
								dqm->total_queue_count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	list_add(&kq->list, &qpd->priv_queue_list);
 | 
						list_add(&kq->list, &qpd->priv_queue_list);
 | 
				
			||||||
	dqm->queue_count++;
 | 
						dqm->queue_count++;
 | 
				
			||||||
	qpd->is_debug = true;
 | 
						qpd->is_debug = true;
 | 
				
			||||||
| 
						 | 
					@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
 | 
				
			||||||
	dqm->queue_count--;
 | 
						dqm->queue_count--;
 | 
				
			||||||
	qpd->is_debug = false;
 | 
						qpd->is_debug = false;
 | 
				
			||||||
	execute_queues_cpsch(dqm, false);
 | 
						execute_queues_cpsch(dqm, false);
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Unconditionally decrement this counter, regardless of the queue's
 | 
				
			||||||
 | 
						 * type.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						dqm->total_queue_count--;
 | 
				
			||||||
 | 
						pr_debug("Total of %d queues are accountable so far\n",
 | 
				
			||||||
 | 
								dqm->total_queue_count);
 | 
				
			||||||
	mutex_unlock(&dqm->lock);
 | 
						mutex_unlock(&dqm->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mutex_lock(&dqm->lock);
 | 
						mutex_lock(&dqm->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (dqm->total_queue_count >= max_num_of_queues_per_device) {
 | 
				
			||||||
 | 
							pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
 | 
				
			||||||
 | 
									dqm->total_queue_count);
 | 
				
			||||||
 | 
							retval = -EPERM;
 | 
				
			||||||
 | 
							goto out;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
 | 
						mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
 | 
				
			||||||
	if (mqd == NULL) {
 | 
						if (mqd == NULL) {
 | 
				
			||||||
		mutex_unlock(&dqm->lock);
 | 
							mutex_unlock(&dqm->lock);
 | 
				
			||||||
| 
						 | 
					@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 | 
				
			||||||
		retval = execute_queues_cpsch(dqm, false);
 | 
							retval = execute_queues_cpsch(dqm, false);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Unconditionally increment this counter, regardless of the queue's
 | 
				
			||||||
 | 
						 * type or whether the queue is active.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						dqm->total_queue_count++;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						pr_debug("Total of %d queues are accountable so far\n",
 | 
				
			||||||
 | 
								dqm->total_queue_count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	mutex_unlock(&dqm->lock);
 | 
						mutex_unlock(&dqm->lock);
 | 
				
			||||||
	return retval;
 | 
						return retval;
 | 
				
			||||||
| 
						 | 
					@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
 | 
						mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Unconditionally decrement this counter, regardless of the queue's
 | 
				
			||||||
 | 
						 * type
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						dqm->total_queue_count--;
 | 
				
			||||||
 | 
						pr_debug("Total of %d queues are accountable so far\n",
 | 
				
			||||||
 | 
								dqm->total_queue_count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mutex_unlock(&dqm->lock);
 | 
						mutex_unlock(&dqm->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -130,6 +130,7 @@ struct device_queue_manager {
 | 
				
			||||||
	struct list_head	queues;
 | 
						struct list_head	queues;
 | 
				
			||||||
	unsigned int		processes_count;
 | 
						unsigned int		processes_count;
 | 
				
			||||||
	unsigned int		queue_count;
 | 
						unsigned int		queue_count;
 | 
				
			||||||
 | 
						unsigned int		total_queue_count;
 | 
				
			||||||
	unsigned int		next_pipe_to_allocate;
 | 
						unsigned int		next_pipe_to_allocate;
 | 
				
			||||||
	unsigned int		*allocated_queues;
 | 
						unsigned int		*allocated_queues;
 | 
				
			||||||
	unsigned int		vmid_bitmap;
 | 
						unsigned int		vmid_bitmap;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
 | 
				
			||||||
MODULE_PARM_DESC(sched_policy,
 | 
					MODULE_PARM_DESC(sched_policy,
 | 
				
			||||||
	"Kernel cmdline parameter that defines the amdkfd scheduling policy");
 | 
						"Kernel cmdline parameter that defines the amdkfd scheduling policy");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
 | 
					int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
 | 
				
			||||||
module_param(max_num_of_processes, int, 0444);
 | 
					module_param(max_num_of_queues_per_device, int, 0444);
 | 
				
			||||||
MODULE_PARM_DESC(max_num_of_processes,
 | 
					MODULE_PARM_DESC(max_num_of_queues_per_device,
 | 
				
			||||||
	"Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
 | 
						"Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
 | 
				
			||||||
 | 
					 | 
				
			||||||
int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
 | 
					 | 
				
			||||||
module_param(max_num_of_queues_per_process, int, 0444);
 | 
					 | 
				
			||||||
MODULE_PARM_DESC(max_num_of_queues_per_process,
 | 
					 | 
				
			||||||
	"Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool kgd2kfd_init(unsigned interface_version,
 | 
					bool kgd2kfd_init(unsigned interface_version,
 | 
				
			||||||
		  const struct kfd2kgd_calls *f2g,
 | 
							  const struct kfd2kgd_calls *f2g,
 | 
				
			||||||
| 
						 | 
					@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Verify module parameters */
 | 
						/* Verify module parameters */
 | 
				
			||||||
	if ((max_num_of_processes < 0) ||
 | 
						if ((max_num_of_queues_per_device < 1) ||
 | 
				
			||||||
		(max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
 | 
							(max_num_of_queues_per_device >
 | 
				
			||||||
		pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
 | 
								KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
 | 
				
			||||||
		return -1;
 | 
							pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if ((max_num_of_queues_per_process < 0) ||
 | 
					 | 
				
			||||||
		(max_num_of_queues_per_process >
 | 
					 | 
				
			||||||
			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
 | 
					 | 
				
			||||||
		pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
 | 
					 | 
				
			||||||
		return -1;
 | 
							return -1;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int kfd_pasid_init(void)
 | 
					int kfd_pasid_init(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	pasid_limit = max_num_of_processes;
 | 
						pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
 | 
						pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
 | 
				
			||||||
	if (!pasid_bitmap)
 | 
						if (!pasid_bitmap)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -52,20 +52,19 @@
 | 
				
			||||||
#define kfd_alloc_struct(ptr_to_struct)	\
 | 
					#define kfd_alloc_struct(ptr_to_struct)	\
 | 
				
			||||||
	((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
 | 
						((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Kernel module parameter to specify maximum number of supported processes */
 | 
					 | 
				
			||||||
extern int max_num_of_processes;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
 | 
					 | 
				
			||||||
#define KFD_MAX_NUM_OF_PROCESSES 512
 | 
					#define KFD_MAX_NUM_OF_PROCESSES 512
 | 
				
			||||||
 | 
					#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Kernel module parameter to specify maximum number of supported queues
 | 
					 * Kernel module parameter to specify maximum number of supported queues per
 | 
				
			||||||
 * per process
 | 
					 * device
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
extern int max_num_of_queues_per_process;
 | 
					extern int max_num_of_queues_per_device;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
 | 
					#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
 | 
				
			||||||
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
 | 
					#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE		\
 | 
				
			||||||
 | 
						(KFD_MAX_NUM_OF_PROCESSES *			\
 | 
				
			||||||
 | 
								KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define KFD_KERNEL_QUEUE_SIZE 2048
 | 
					#define KFD_KERNEL_QUEUE_SIZE 2048
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
 | 
				
			||||||
	pr_debug("kfd: in %s\n", __func__);
 | 
						pr_debug("kfd: in %s\n", __func__);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	found = find_first_zero_bit(pqm->queue_slot_bitmap,
 | 
						found = find_first_zero_bit(pqm->queue_slot_bitmap,
 | 
				
			||||||
			max_num_of_queues_per_process);
 | 
								KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pr_debug("kfd: the new slot id %lu\n", found);
 | 
						pr_debug("kfd: the new slot id %lu\n", found);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (found >= max_num_of_queues_per_process) {
 | 
						if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
 | 
				
			||||||
		pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
 | 
							pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
 | 
				
			||||||
				pqm->process->pasid);
 | 
									pqm->process->pasid);
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
| 
						 | 
					@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	INIT_LIST_HEAD(&pqm->queues);
 | 
						INIT_LIST_HEAD(&pqm->queues);
 | 
				
			||||||
	pqm->queue_slot_bitmap =
 | 
						pqm->queue_slot_bitmap =
 | 
				
			||||||
			kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
 | 
								kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
 | 
				
			||||||
					BITS_PER_BYTE), GFP_KERNEL);
 | 
										BITS_PER_BYTE), GFP_KERNEL);
 | 
				
			||||||
	if (pqm->queue_slot_bitmap == NULL)
 | 
						if (pqm->queue_slot_bitmap == NULL)
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
| 
						 | 
					@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 | 
				
			||||||
		pqn->kq = NULL;
 | 
							pqn->kq = NULL;
 | 
				
			||||||
		retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
 | 
							retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
 | 
				
			||||||
						&q->properties.vmid);
 | 
											&q->properties.vmid);
 | 
				
			||||||
 | 
							pr_debug("DQM returned %d for create_queue\n", retval);
 | 
				
			||||||
		print_queue(q);
 | 
							print_queue(q);
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case KFD_QUEUE_TYPE_DIQ:
 | 
						case KFD_QUEUE_TYPE_DIQ:
 | 
				
			||||||
| 
						 | 
					@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (retval != 0) {
 | 
						if (retval != 0) {
 | 
				
			||||||
		pr_err("kfd: error dqm create queue\n");
 | 
							pr_debug("Error dqm create queue\n");
 | 
				
			||||||
		goto err_create_queue;
 | 
							goto err_create_queue;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 | 
				
			||||||
err_create_queue:
 | 
					err_create_queue:
 | 
				
			||||||
	kfree(pqn);
 | 
						kfree(pqn);
 | 
				
			||||||
err_allocate_pqn:
 | 
					err_allocate_pqn:
 | 
				
			||||||
 | 
						/* check if queues list is empty unregister process from device */
 | 
				
			||||||
	clear_bit(*qid, pqm->queue_slot_bitmap);
 | 
						clear_bit(*qid, pqm->queue_slot_bitmap);
 | 
				
			||||||
 | 
						if (list_empty(&pqm->queues))
 | 
				
			||||||
 | 
							dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
 | 
				
			||||||
	return retval;
 | 
						return retval;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -311,7 +315,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
 | 
				
			||||||
	BUG_ON(!pqm);
 | 
						BUG_ON(!pqm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pqn = get_queue_by_qid(pqm, qid);
 | 
						pqn = get_queue_by_qid(pqm, qid);
 | 
				
			||||||
	BUG_ON(!pqn);
 | 
						if (!pqn) {
 | 
				
			||||||
 | 
							pr_debug("amdkfd: No queue %d exists for update operation\n",
 | 
				
			||||||
 | 
									qid);
 | 
				
			||||||
 | 
							return -EFAULT;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pqn->q->properties.queue_address = p->queue_address;
 | 
						pqn->q->properties.queue_address = p->queue_address;
 | 
				
			||||||
	pqn->q->properties.queue_size = p->queue_size;
 | 
						pqn->q->properties.queue_size = p->queue_size;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -16,9 +16,12 @@
 | 
				
			||||||
#include "cirrus_drv.h"
 | 
					#include "cirrus_drv.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int cirrus_modeset = -1;
 | 
					int cirrus_modeset = -1;
 | 
				
			||||||
 | 
					int cirrus_bpp = 24;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
 | 
					MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
 | 
				
			||||||
module_param_named(modeset, cirrus_modeset, int, 0400);
 | 
					module_param_named(modeset, cirrus_modeset, int, 0400);
 | 
				
			||||||
 | 
					MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)");
 | 
				
			||||||
 | 
					module_param_named(bpp, cirrus_bpp, int, 0400);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * This is the generic driver code. This binds the driver to the drm core,
 | 
					 * This is the generic driver code. This binds the driver to the drm core,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int cirrus_bo_push_sysram(struct cirrus_bo *bo);
 | 
					int cirrus_bo_push_sysram(struct cirrus_bo *bo);
 | 
				
			||||||
int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
 | 
					int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern int cirrus_bpp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif				/* __CIRRUS_DRV_H__ */
 | 
					#endif				/* __CIRRUS_DRV_H__ */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
 | 
				
			||||||
	const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
 | 
						const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
 | 
				
			||||||
	const int max_size = cdev->mc.vram_size;
 | 
						const int max_size = cdev->mc.vram_size;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (bpp > cirrus_bpp)
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
	if (bpp > 32)
 | 
						if (bpp > 32)
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
 | 
				
			||||||
	int count;
 | 
						int count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Just add a static list of modes */
 | 
						/* Just add a static list of modes */
 | 
				
			||||||
	count = drm_add_modes_noedid(connector, 1280, 1024);
 | 
						if (cirrus_bpp <= 24) {
 | 
				
			||||||
	drm_set_preferred_mode(connector, 1024, 768);
 | 
							count = drm_add_modes_noedid(connector, 1280, 1024);
 | 
				
			||||||
 | 
							drm_set_preferred_mode(connector, 1024, 768);
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							count = drm_add_modes_noedid(connector, 800, 600);
 | 
				
			||||||
 | 
							drm_set_preferred_mode(connector, 800, 600);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
	return count;
 | 
						return count;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
 | 
					EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void remove_from_modeset(struct drm_mode_set *set,
 | 
				
			||||||
 | 
							struct drm_connector *connector)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i, j;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < set->num_connectors; i++) {
 | 
				
			||||||
 | 
							if (set->connectors[i] == connector)
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (i == set->num_connectors)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (j = i + 1; j < set->num_connectors; j++) {
 | 
				
			||||||
 | 
							set->connectors[j - 1] = set->connectors[j];
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						set->num_connectors--;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* because i915 is pissy about this..
 | 
				
			||||||
 | 
						 * TODO maybe need to makes sure we set it back to !=NULL somewhere?
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (set->num_connectors == 0)
 | 
				
			||||||
 | 
							set->fb = NULL;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
 | 
					int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
 | 
				
			||||||
				       struct drm_connector *connector)
 | 
									       struct drm_connector *connector)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	fb_helper->connector_count--;
 | 
						fb_helper->connector_count--;
 | 
				
			||||||
	kfree(fb_helper_connector);
 | 
						kfree(fb_helper_connector);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* also cleanup dangling references to the connector: */
 | 
				
			||||||
 | 
						for (i = 0; i < fb_helper->crtc_count; i++)
 | 
				
			||||||
 | 
							remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
 | 
					EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -32,6 +32,8 @@
 | 
				
			||||||
struct tda998x_priv {
 | 
					struct tda998x_priv {
 | 
				
			||||||
	struct i2c_client *cec;
 | 
						struct i2c_client *cec;
 | 
				
			||||||
	struct i2c_client *hdmi;
 | 
						struct i2c_client *hdmi;
 | 
				
			||||||
 | 
						struct mutex mutex;
 | 
				
			||||||
 | 
						struct delayed_work dwork;
 | 
				
			||||||
	uint16_t rev;
 | 
						uint16_t rev;
 | 
				
			||||||
	uint8_t current_page;
 | 
						uint8_t current_page;
 | 
				
			||||||
	int dpms;
 | 
						int dpms;
 | 
				
			||||||
| 
						 | 
					@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
 | 
				
			||||||
	uint8_t addr = REG2ADDR(reg);
 | 
						uint8_t addr = REG2ADDR(reg);
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mutex_lock(&priv->mutex);
 | 
				
			||||||
	ret = set_page(priv, reg);
 | 
						ret = set_page(priv, reg);
 | 
				
			||||||
	if (ret < 0)
 | 
						if (ret < 0)
 | 
				
			||||||
		return ret;
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = i2c_master_send(client, &addr, sizeof(addr));
 | 
						ret = i2c_master_send(client, &addr, sizeof(addr));
 | 
				
			||||||
	if (ret < 0)
 | 
						if (ret < 0)
 | 
				
			||||||
| 
						 | 
					@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
 | 
				
			||||||
	if (ret < 0)
 | 
						if (ret < 0)
 | 
				
			||||||
		goto fail;
 | 
							goto fail;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return ret;
 | 
						goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
fail:
 | 
					fail:
 | 
				
			||||||
	dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
 | 
						dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
 | 
				
			||||||
 | 
					out:
 | 
				
			||||||
 | 
						mutex_unlock(&priv->mutex);
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
 | 
				
			||||||
	buf[0] = REG2ADDR(reg);
 | 
						buf[0] = REG2ADDR(reg);
 | 
				
			||||||
	memcpy(&buf[1], p, cnt);
 | 
						memcpy(&buf[1], p, cnt);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mutex_lock(&priv->mutex);
 | 
				
			||||||
	ret = set_page(priv, reg);
 | 
						ret = set_page(priv, reg);
 | 
				
			||||||
	if (ret < 0)
 | 
						if (ret < 0)
 | 
				
			||||||
		return;
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = i2c_master_send(client, buf, cnt + 1);
 | 
						ret = i2c_master_send(client, buf, cnt + 1);
 | 
				
			||||||
	if (ret < 0)
 | 
						if (ret < 0)
 | 
				
			||||||
		dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
 | 
							dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
 | 
				
			||||||
 | 
					out:
 | 
				
			||||||
 | 
						mutex_unlock(&priv->mutex);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int
 | 
					static int
 | 
				
			||||||
| 
						 | 
					@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
 | 
				
			||||||
	uint8_t buf[] = {REG2ADDR(reg), val};
 | 
						uint8_t buf[] = {REG2ADDR(reg), val};
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mutex_lock(&priv->mutex);
 | 
				
			||||||
	ret = set_page(priv, reg);
 | 
						ret = set_page(priv, reg);
 | 
				
			||||||
	if (ret < 0)
 | 
						if (ret < 0)
 | 
				
			||||||
		return;
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = i2c_master_send(client, buf, sizeof(buf));
 | 
						ret = i2c_master_send(client, buf, sizeof(buf));
 | 
				
			||||||
	if (ret < 0)
 | 
						if (ret < 0)
 | 
				
			||||||
		dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
 | 
							dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
 | 
				
			||||||
 | 
					out:
 | 
				
			||||||
 | 
						mutex_unlock(&priv->mutex);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void
 | 
					static void
 | 
				
			||||||
| 
						 | 
					@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
 | 
				
			||||||
	uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
 | 
						uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mutex_lock(&priv->mutex);
 | 
				
			||||||
	ret = set_page(priv, reg);
 | 
						ret = set_page(priv, reg);
 | 
				
			||||||
	if (ret < 0)
 | 
						if (ret < 0)
 | 
				
			||||||
		return;
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = i2c_master_send(client, buf, sizeof(buf));
 | 
						ret = i2c_master_send(client, buf, sizeof(buf));
 | 
				
			||||||
	if (ret < 0)
 | 
						if (ret < 0)
 | 
				
			||||||
		dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
 | 
							dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
 | 
				
			||||||
 | 
					out:
 | 
				
			||||||
 | 
						mutex_unlock(&priv->mutex);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void
 | 
					static void
 | 
				
			||||||
| 
						 | 
					@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
 | 
				
			||||||
	reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
 | 
						reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* handle HDMI connect/disconnect */
 | 
				
			||||||
 | 
					static void tda998x_hpd(struct work_struct *work)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct delayed_work *dwork = to_delayed_work(work);
 | 
				
			||||||
 | 
						struct tda998x_priv *priv =
 | 
				
			||||||
 | 
								container_of(dwork, struct tda998x_priv, dwork);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (priv->encoder && priv->encoder->dev)
 | 
				
			||||||
 | 
							drm_kms_helper_hotplug_event(priv->encoder->dev);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * only 2 interrupts may occur: screen plug/unplug and EDID read
 | 
					 * only 2 interrupts may occur: screen plug/unplug and EDID read
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
| 
						 | 
					@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
 | 
				
			||||||
		priv->wq_edid_wait = 0;
 | 
							priv->wq_edid_wait = 0;
 | 
				
			||||||
		wake_up(&priv->wq_edid);
 | 
							wake_up(&priv->wq_edid);
 | 
				
			||||||
	} else if (cec != 0) {			/* HPD change */
 | 
						} else if (cec != 0) {			/* HPD change */
 | 
				
			||||||
		if (priv->encoder && priv->encoder->dev)
 | 
							schedule_delayed_work(&priv->dwork, HZ/10);
 | 
				
			||||||
			drm_helper_hpd_irq_event(priv->encoder->dev);
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return IRQ_HANDLED;
 | 
						return IRQ_HANDLED;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
 | 
				
			||||||
	/* disable all IRQs and free the IRQ handler */
 | 
						/* disable all IRQs and free the IRQ handler */
 | 
				
			||||||
	cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
 | 
						cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
 | 
				
			||||||
	reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
 | 
						reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
 | 
				
			||||||
	if (priv->hdmi->irq)
 | 
						if (priv->hdmi->irq) {
 | 
				
			||||||
		free_irq(priv->hdmi->irq, priv);
 | 
							free_irq(priv->hdmi->irq, priv);
 | 
				
			||||||
 | 
							cancel_delayed_work_sync(&priv->dwork);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	i2c_unregister_device(priv->cec);
 | 
						i2c_unregister_device(priv->cec);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 | 
				
			||||||
	struct device_node *np = client->dev.of_node;
 | 
						struct device_node *np = client->dev.of_node;
 | 
				
			||||||
	u32 video;
 | 
						u32 video;
 | 
				
			||||||
	int rev_lo, rev_hi, ret;
 | 
						int rev_lo, rev_hi, ret;
 | 
				
			||||||
 | 
						unsigned short cec_addr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
 | 
						priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
 | 
				
			||||||
	priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
 | 
						priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
 | 
				
			||||||
| 
						 | 
					@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	priv->current_page = 0xff;
 | 
						priv->current_page = 0xff;
 | 
				
			||||||
	priv->hdmi = client;
 | 
						priv->hdmi = client;
 | 
				
			||||||
	priv->cec = i2c_new_dummy(client->adapter, 0x34);
 | 
						/* CEC I2C address bound to TDA998x I2C addr by configuration pins */
 | 
				
			||||||
 | 
						cec_addr = 0x34 + (client->addr & 0x03);
 | 
				
			||||||
 | 
						priv->cec = i2c_new_dummy(client->adapter, cec_addr);
 | 
				
			||||||
	if (!priv->cec)
 | 
						if (!priv->cec)
 | 
				
			||||||
		return -ENODEV;
 | 
							return -ENODEV;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	priv->dpms = DRM_MODE_DPMS_OFF;
 | 
						priv->dpms = DRM_MODE_DPMS_OFF;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mutex_init(&priv->mutex);	/* protect the page access */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* wake up the device: */
 | 
						/* wake up the device: */
 | 
				
			||||||
	cec_write(priv, REG_CEC_ENAMODS,
 | 
						cec_write(priv, REG_CEC_ENAMODS,
 | 
				
			||||||
			CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
 | 
								CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
 | 
				
			||||||
| 
						 | 
					@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 | 
				
			||||||
	if (client->irq) {
 | 
						if (client->irq) {
 | 
				
			||||||
		int irqf_trigger;
 | 
							int irqf_trigger;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* init read EDID waitqueue */
 | 
							/* init read EDID waitqueue and HDP work */
 | 
				
			||||||
		init_waitqueue_head(&priv->wq_edid);
 | 
							init_waitqueue_head(&priv->wq_edid);
 | 
				
			||||||
 | 
							INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* clear pending interrupts */
 | 
							/* clear pending interrupts */
 | 
				
			||||||
		reg_read(priv, REG_INT_FLAGS_0);
 | 
							reg_read(priv, REG_INT_FLAGS_0);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
		Reference in a new issue