mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	drm-misc-next for 5.14:
UAPI Changes:
 
  * drm/panfrost: Export AFBC_FEATURES register to userspace
 
 Cross-subsystem Changes:
 
  * dma-buf: Fix debug printing; Rename dma_resv_*() functions + changes
    in callers; Cleanups
 
 Core Changes:
 
  * Add prefetching memcpy for WC
 
  * Avoid circular dependency on CONFIG_FB
 
  * Cleanups
 
  * Documentation fixes throughout DRM
 
  * ttm: Make struct ttm_resource the base of all managers + changes
    in all users of TTM; Add a generic memcpy for page-based iomem; Remove
    use of VM_MIXEDMAP; Cleanups
 
 Driver Changes:
 
  * drm/bridge: Add TI SN65DSI83 and SN65DSI84 + DT bindings
 
  * drm/hyperv: Add DRM driver for HyperV graphics output
 
  * drm/msm: Fix module dependencies
 
  * drm/panel: KD53T133: Support rotation
 
  * drm/pl111: Fix module dependencies
 
  * drm/qxl: Fixes
 
  * drm/stm: Cleanups
 
  * drm/sun4i: Be explicit about format modifiers
 
  * drm/vc4: Use struct gpio_desc; Cleanups
 
  * drm/vgem: Cleanups
 
  * drm/vmwgfx: Use ttm_bo_move_null() if there's nothing to copy
 
  * fbdev/mach64: Cleanups
 
  * fbdev/mb862xx: Use DEVICE_ATTR_RO
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEchf7rIzpz2NEoWjlaA3BHVMLeiMFAmDAcD0ACgkQaA3BHVML
 eiMkvwf8CwJk2XBHwejx07UKR09jXD2fdHqXElPSsPCwz/L+zIIAr5NqswQupnKl
 n8WAPgrXAGGpQuQEdkjYbukpL6kWIbg+nqdynWSS7Zf6h0SdZMqdYxGdJ9ciarVs
 Aoc56RLJaD97CaxPD5PmkQxUuRyXlMHINjUGevjWqIcGG3CMmh+AdCGx5RChMG4K
 MiIMgdzdg09AGGmlWTe56y7ihH1RWSfgyh/BHsMJ+bxhIpLQzm7Yul5zMSh/hQY5
 qJdDAdKGOKj99Z+UL9C8ZTU3sAMHfqZR0DyqlFTd7cYvT6ZnFoF1mGJ+Tkpz/DB2
 r4/CX2B6x39sNV1lOF7qKQ1kQLgEBw==
 =VT2X
 -----END PGP SIGNATURE-----
Merge tag 'drm-misc-next-2021-06-09' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.14:
UAPI Changes:
 * drm/panfrost: Export AFBC_FEATURES register to userspace
Cross-subsystem Changes:
 * dma-buf: Fix debug printing; Rename dma_resv_*() functions + changes
   in callers; Cleanups
Core Changes:
 * Add prefetching memcpy for WC
 * Avoid circular dependency on CONFIG_FB
 * Cleanups
 * Documentation fixes throughout DRM
 * ttm: Make struct ttm_resource the base of all managers + changes
   in all users of TTM; Add a generic memcpy for page-based iomem; Remove
   use of VM_MIXEDMAP; Cleanups
Driver Changes:
 * drm/bridge: Add TI SN65DSI83 and SN65DSI84 + DT bindings
 * drm/hyperv: Add DRM driver for HyperV graphics output
 * drm/msm: Fix module dependencies
 * drm/panel: KD53T133: Support rotation
 * drm/pl111: Fix module dependencies
 * drm/qxl: Fixes
 * drm/stm: Cleanups
 * drm/sun4i: Be explicit about format modifiers
 * drm/vc4: Use struct gpio_desc; Cleanups
 * drm/vgem: Cleanups
 * drm/vmwgfx: Use ttm_bo_move_null() if there's nothing to copy
 * fbdev/mach64: Cleanups
 * fbdev/mb862xx: Use DEVICE_ATTR_RO
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/YMBw3DF2b9udByfT@linux-uq9g
			
			
This commit is contained in:
		
						commit
						09b020bb05
					
				
					 154 changed files with 3856 additions and 1266 deletions
				
			
		| 
						 | 
					@ -0,0 +1,159 @@
 | 
				
			||||||
 | 
					# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 | 
				
			||||||
 | 
					%YAML 1.2
 | 
				
			||||||
 | 
					---
 | 
				
			||||||
 | 
					$id: http://devicetree.org/schemas/display/bridge/ti,sn65dsi83.yaml#
 | 
				
			||||||
 | 
					$schema: http://devicetree.org/meta-schemas/core.yaml#
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					title: SN65DSI83 and SN65DSI84 DSI to LVDS bridge chip
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					maintainers:
 | 
				
			||||||
 | 
					  - Marek Vasut <marex@denx.de>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					description: |
 | 
				
			||||||
 | 
					  Texas Instruments SN65DSI83 1x Single-link MIPI DSI
 | 
				
			||||||
 | 
					  to 1x Single-link LVDS
 | 
				
			||||||
 | 
					  https://www.ti.com/lit/gpn/sn65dsi83
 | 
				
			||||||
 | 
					  Texas Instruments SN65DSI84 1x Single-link MIPI DSI
 | 
				
			||||||
 | 
					  to 1x Dual-link or 2x Single-link LVDS
 | 
				
			||||||
 | 
					  https://www.ti.com/lit/gpn/sn65dsi84
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					properties:
 | 
				
			||||||
 | 
					  compatible:
 | 
				
			||||||
 | 
					    enum:
 | 
				
			||||||
 | 
					      - ti,sn65dsi83
 | 
				
			||||||
 | 
					      - ti,sn65dsi84
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  reg:
 | 
				
			||||||
 | 
					    enum:
 | 
				
			||||||
 | 
					      - 0x2c
 | 
				
			||||||
 | 
					      - 0x2d
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  enable-gpios:
 | 
				
			||||||
 | 
					    maxItems: 1
 | 
				
			||||||
 | 
					    description: GPIO specifier for bridge_en pin (active high).
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  ports:
 | 
				
			||||||
 | 
					    $ref: /schemas/graph.yaml#/properties/ports
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    properties:
 | 
				
			||||||
 | 
					      port@0:
 | 
				
			||||||
 | 
					        $ref: /schemas/graph.yaml#/properties/port
 | 
				
			||||||
 | 
					        description: Video port for MIPI DSI Channel-A input
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        properties:
 | 
				
			||||||
 | 
					          endpoint:
 | 
				
			||||||
 | 
					            $ref: /schemas/media/video-interfaces.yaml#
 | 
				
			||||||
 | 
					            unevaluatedProperties: false
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            properties:
 | 
				
			||||||
 | 
					              data-lanes:
 | 
				
			||||||
 | 
					                description: array of physical DSI data lane indexes.
 | 
				
			||||||
 | 
					                minItems: 1
 | 
				
			||||||
 | 
					                maxItems: 4
 | 
				
			||||||
 | 
					                items:
 | 
				
			||||||
 | 
					                  - const: 1
 | 
				
			||||||
 | 
					                  - const: 2
 | 
				
			||||||
 | 
					                  - const: 3
 | 
				
			||||||
 | 
					                  - const: 4
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					      port@1:
 | 
				
			||||||
 | 
					        $ref: /schemas/graph.yaml#/properties/port
 | 
				
			||||||
 | 
					        description: Video port for MIPI DSI Channel-B input
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        properties:
 | 
				
			||||||
 | 
					          endpoint:
 | 
				
			||||||
 | 
					            $ref: /schemas/media/video-interfaces.yaml#
 | 
				
			||||||
 | 
					            unevaluatedProperties: false
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            properties:
 | 
				
			||||||
 | 
					              data-lanes:
 | 
				
			||||||
 | 
					                description: array of physical DSI data lane indexes.
 | 
				
			||||||
 | 
					                minItems: 1
 | 
				
			||||||
 | 
					                maxItems: 4
 | 
				
			||||||
 | 
					                items:
 | 
				
			||||||
 | 
					                  - const: 1
 | 
				
			||||||
 | 
					                  - const: 2
 | 
				
			||||||
 | 
					                  - const: 3
 | 
				
			||||||
 | 
					                  - const: 4
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					      port@2:
 | 
				
			||||||
 | 
					        $ref: /schemas/graph.yaml#/properties/port
 | 
				
			||||||
 | 
					        description: Video port for LVDS Channel-A output (panel or bridge).
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					      port@3:
 | 
				
			||||||
 | 
					        $ref: /schemas/graph.yaml#/properties/port
 | 
				
			||||||
 | 
					        description: Video port for LVDS Channel-B output (panel or bridge).
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    required:
 | 
				
			||||||
 | 
					      - port@0
 | 
				
			||||||
 | 
					      - port@2
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					required:
 | 
				
			||||||
 | 
					  - compatible
 | 
				
			||||||
 | 
					  - reg
 | 
				
			||||||
 | 
					  - enable-gpios
 | 
				
			||||||
 | 
					  - ports
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					allOf:
 | 
				
			||||||
 | 
					  - if:
 | 
				
			||||||
 | 
					      properties:
 | 
				
			||||||
 | 
					        compatible:
 | 
				
			||||||
 | 
					          contains:
 | 
				
			||||||
 | 
					            const: ti,sn65dsi83
 | 
				
			||||||
 | 
					    then:
 | 
				
			||||||
 | 
					      properties:
 | 
				
			||||||
 | 
					        ports:
 | 
				
			||||||
 | 
					          properties:
 | 
				
			||||||
 | 
					            port@1: false
 | 
				
			||||||
 | 
					            port@3: false
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  - if:
 | 
				
			||||||
 | 
					      properties:
 | 
				
			||||||
 | 
					        compatible:
 | 
				
			||||||
 | 
					          contains:
 | 
				
			||||||
 | 
					            const: ti,sn65dsi84
 | 
				
			||||||
 | 
					    then:
 | 
				
			||||||
 | 
					      properties:
 | 
				
			||||||
 | 
					        ports:
 | 
				
			||||||
 | 
					          properties:
 | 
				
			||||||
 | 
					            port@1: false
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					additionalProperties: false
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					examples:
 | 
				
			||||||
 | 
					  - |
 | 
				
			||||||
 | 
					    #include <dt-bindings/gpio/gpio.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    i2c {
 | 
				
			||||||
 | 
					        #address-cells = <1>;
 | 
				
			||||||
 | 
					        #size-cells = <0>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        bridge@2d {
 | 
				
			||||||
 | 
					            compatible = "ti,sn65dsi83";
 | 
				
			||||||
 | 
					            reg = <0x2d>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            enable-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            ports {
 | 
				
			||||||
 | 
					                #address-cells = <1>;
 | 
				
			||||||
 | 
					                #size-cells = <0>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                port@0 {
 | 
				
			||||||
 | 
					                    reg = <0>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                    endpoint {
 | 
				
			||||||
 | 
					                        remote-endpoint = <&dsi0_out>;
 | 
				
			||||||
 | 
					                        data-lanes = <1 2 3 4>;
 | 
				
			||||||
 | 
					                    };
 | 
				
			||||||
 | 
					                };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                port@2 {
 | 
				
			||||||
 | 
					                    reg = <2>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                    endpoint {
 | 
				
			||||||
 | 
					                        remote-endpoint = <&panel_in_lvds>;
 | 
				
			||||||
 | 
					                    };
 | 
				
			||||||
 | 
					                };
 | 
				
			||||||
 | 
					            };
 | 
				
			||||||
 | 
					        };
 | 
				
			||||||
 | 
					    };
 | 
				
			||||||
| 
						 | 
					@ -178,6 +178,15 @@ DMA Fence Array
 | 
				
			||||||
.. kernel-doc:: include/linux/dma-fence-array.h
 | 
					.. kernel-doc:: include/linux/dma-fence-array.h
 | 
				
			||||||
   :internal:
 | 
					   :internal:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					DMA Fence Chain
 | 
				
			||||||
 | 
					~~~~~~~~~~~~~~~
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					.. kernel-doc:: drivers/dma-buf/dma-fence-chain.c
 | 
				
			||||||
 | 
					   :export:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					.. kernel-doc:: include/linux/dma-fence-chain.h
 | 
				
			||||||
 | 
					   :internal:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DMA Fence uABI/Sync File
 | 
					DMA Fence uABI/Sync File
 | 
				
			||||||
~~~~~~~~~~~~~~~~~~~~~~~~
 | 
					~~~~~~~~~~~~~~~~~~~~~~~~
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -469,8 +469,8 @@ DRM MM Range Allocator Function References
 | 
				
			||||||
.. kernel-doc:: drivers/gpu/drm/drm_mm.c
 | 
					.. kernel-doc:: drivers/gpu/drm/drm_mm.c
 | 
				
			||||||
   :export:
 | 
					   :export:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DRM Cache Handling
 | 
					DRM Cache Handling and Fast WC memcpy()
 | 
				
			||||||
==================
 | 
					=======================================
 | 
				
			||||||
 | 
					
 | 
				
			||||||
.. kernel-doc:: drivers/gpu/drm/drm_cache.c
 | 
					.. kernel-doc:: drivers/gpu/drm/drm_cache.c
 | 
				
			||||||
   :export:
 | 
					   :export:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -6084,6 +6084,14 @@ T:	git git://anongit.freedesktop.org/drm/drm-misc
 | 
				
			||||||
F:	Documentation/devicetree/bindings/display/hisilicon/
 | 
					F:	Documentation/devicetree/bindings/display/hisilicon/
 | 
				
			||||||
F:	drivers/gpu/drm/hisilicon/
 | 
					F:	drivers/gpu/drm/hisilicon/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					DRM DRIVER FOR HYPERV SYNTHETIC VIDEO DEVICE
 | 
				
			||||||
 | 
					M:	Deepak Rawat <drawat.floss@gmail.com>
 | 
				
			||||||
 | 
					L:	linux-hyperv@vger.kernel.org
 | 
				
			||||||
 | 
					L:	dri-devel@lists.freedesktop.org
 | 
				
			||||||
 | 
					S:	Maintained
 | 
				
			||||||
 | 
					T:	git git://anongit.freedesktop.org/drm/drm-misc
 | 
				
			||||||
 | 
					F:	drivers/gpu/drm/hyperv
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DRM DRIVERS FOR LIMA
 | 
					DRM DRIVERS FOR LIMA
 | 
				
			||||||
M:	Qiang Yu <yuq825@gmail.com>
 | 
					M:	Qiang Yu <yuq825@gmail.com>
 | 
				
			||||||
L:	dri-devel@lists.freedesktop.org
 | 
					L:	dri-devel@lists.freedesktop.org
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -234,7 +234,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 | 
				
			||||||
		shared_count = fobj->shared_count;
 | 
							shared_count = fobj->shared_count;
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		shared_count = 0;
 | 
							shared_count = 0;
 | 
				
			||||||
	fence_excl = rcu_dereference(resv->fence_excl);
 | 
						fence_excl = dma_resv_excl_fence(resv);
 | 
				
			||||||
	if (read_seqcount_retry(&resv->seq, seq)) {
 | 
						if (read_seqcount_retry(&resv->seq, seq)) {
 | 
				
			||||||
		rcu_read_unlock();
 | 
							rcu_read_unlock();
 | 
				
			||||||
		goto retry;
 | 
							goto retry;
 | 
				
			||||||
| 
						 | 
					@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 | 
				
			||||||
	long ret;
 | 
						long ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Wait on any implicit rendering fences */
 | 
						/* Wait on any implicit rendering fences */
 | 
				
			||||||
	ret = dma_resv_wait_timeout_rcu(resv, write, true,
 | 
						ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
 | 
				
			||||||
						  MAX_SCHEDULE_TIMEOUT);
 | 
					 | 
				
			||||||
	if (ret < 0)
 | 
						if (ret < 0)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1349,15 +1348,14 @@ EXPORT_SYMBOL_GPL(dma_buf_vunmap);
 | 
				
			||||||
#ifdef CONFIG_DEBUG_FS
 | 
					#ifdef CONFIG_DEBUG_FS
 | 
				
			||||||
static int dma_buf_debug_show(struct seq_file *s, void *unused)
 | 
					static int dma_buf_debug_show(struct seq_file *s, void *unused)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int ret;
 | 
					 | 
				
			||||||
	struct dma_buf *buf_obj;
 | 
						struct dma_buf *buf_obj;
 | 
				
			||||||
	struct dma_buf_attachment *attach_obj;
 | 
						struct dma_buf_attachment *attach_obj;
 | 
				
			||||||
	struct dma_resv *robj;
 | 
						struct dma_resv *robj;
 | 
				
			||||||
	struct dma_resv_list *fobj;
 | 
						struct dma_resv_list *fobj;
 | 
				
			||||||
	struct dma_fence *fence;
 | 
						struct dma_fence *fence;
 | 
				
			||||||
	unsigned seq;
 | 
					 | 
				
			||||||
	int count = 0, attach_count, shared_count, i;
 | 
						int count = 0, attach_count, shared_count, i;
 | 
				
			||||||
	size_t size = 0;
 | 
						size_t size = 0;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = mutex_lock_interruptible(&db_list.lock);
 | 
						ret = mutex_lock_interruptible(&db_list.lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1383,33 +1381,24 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 | 
				
			||||||
				buf_obj->name ?: "");
 | 
									buf_obj->name ?: "");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		robj = buf_obj->resv;
 | 
							robj = buf_obj->resv;
 | 
				
			||||||
		while (true) {
 | 
							fence = dma_resv_excl_fence(robj);
 | 
				
			||||||
			seq = read_seqcount_begin(&robj->seq);
 | 
					 | 
				
			||||||
			rcu_read_lock();
 | 
					 | 
				
			||||||
			fobj = rcu_dereference(robj->fence);
 | 
					 | 
				
			||||||
			shared_count = fobj ? fobj->shared_count : 0;
 | 
					 | 
				
			||||||
			fence = rcu_dereference(robj->fence_excl);
 | 
					 | 
				
			||||||
			if (!read_seqcount_retry(&robj->seq, seq))
 | 
					 | 
				
			||||||
				break;
 | 
					 | 
				
			||||||
			rcu_read_unlock();
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (fence)
 | 
							if (fence)
 | 
				
			||||||
			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
 | 
								seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
 | 
				
			||||||
				   fence->ops->get_driver_name(fence),
 | 
									   fence->ops->get_driver_name(fence),
 | 
				
			||||||
				   fence->ops->get_timeline_name(fence),
 | 
									   fence->ops->get_timeline_name(fence),
 | 
				
			||||||
				   dma_fence_is_signaled(fence) ? "" : "un");
 | 
									   dma_fence_is_signaled(fence) ? "" : "un");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							fobj = rcu_dereference_protected(robj->fence,
 | 
				
			||||||
 | 
											 dma_resv_held(robj));
 | 
				
			||||||
 | 
							shared_count = fobj ? fobj->shared_count : 0;
 | 
				
			||||||
		for (i = 0; i < shared_count; i++) {
 | 
							for (i = 0; i < shared_count; i++) {
 | 
				
			||||||
			fence = rcu_dereference(fobj->shared[i]);
 | 
								fence = rcu_dereference_protected(fobj->shared[i],
 | 
				
			||||||
			if (!dma_fence_get_rcu(fence))
 | 
												  dma_resv_held(robj));
 | 
				
			||||||
				continue;
 | 
					 | 
				
			||||||
			seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
 | 
								seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
 | 
				
			||||||
				   fence->ops->get_driver_name(fence),
 | 
									   fence->ops->get_driver_name(fence),
 | 
				
			||||||
				   fence->ops->get_timeline_name(fence),
 | 
									   fence->ops->get_timeline_name(fence),
 | 
				
			||||||
				   dma_fence_is_signaled(fence) ? "" : "un");
 | 
									   dma_fence_is_signaled(fence) ? "" : "un");
 | 
				
			||||||
			dma_fence_put(fence);
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		rcu_read_unlock();
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		seq_puts(s, "\tAttached Devices:\n");
 | 
							seq_puts(s, "\tAttached Devices:\n");
 | 
				
			||||||
		attach_count = 0;
 | 
							attach_count = 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,3 +1,4 @@
 | 
				
			||||||
 | 
					// SPDX-License-Identifier: MIT
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
 | 
					 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
| 
						 | 
					@ -92,49 +93,6 @@ static void dma_resv_list_free(struct dma_resv_list *list)
 | 
				
			||||||
	kfree_rcu(list, rcu);
 | 
						kfree_rcu(list, rcu);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if IS_ENABLED(CONFIG_LOCKDEP)
 | 
					 | 
				
			||||||
static int __init dma_resv_lockdep(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct mm_struct *mm = mm_alloc();
 | 
					 | 
				
			||||||
	struct ww_acquire_ctx ctx;
 | 
					 | 
				
			||||||
	struct dma_resv obj;
 | 
					 | 
				
			||||||
	struct address_space mapping;
 | 
					 | 
				
			||||||
	int ret;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!mm)
 | 
					 | 
				
			||||||
		return -ENOMEM;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	dma_resv_init(&obj);
 | 
					 | 
				
			||||||
	address_space_init_once(&mapping);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	mmap_read_lock(mm);
 | 
					 | 
				
			||||||
	ww_acquire_init(&ctx, &reservation_ww_class);
 | 
					 | 
				
			||||||
	ret = dma_resv_lock(&obj, &ctx);
 | 
					 | 
				
			||||||
	if (ret == -EDEADLK)
 | 
					 | 
				
			||||||
		dma_resv_lock_slow(&obj, &ctx);
 | 
					 | 
				
			||||||
	fs_reclaim_acquire(GFP_KERNEL);
 | 
					 | 
				
			||||||
	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
 | 
					 | 
				
			||||||
	i_mmap_lock_write(&mapping);
 | 
					 | 
				
			||||||
	i_mmap_unlock_write(&mapping);
 | 
					 | 
				
			||||||
#ifdef CONFIG_MMU_NOTIFIER
 | 
					 | 
				
			||||||
	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
 | 
					 | 
				
			||||||
	__dma_fence_might_wait();
 | 
					 | 
				
			||||||
	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
 | 
					 | 
				
			||||||
#else
 | 
					 | 
				
			||||||
	__dma_fence_might_wait();
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
	fs_reclaim_release(GFP_KERNEL);
 | 
					 | 
				
			||||||
	ww_mutex_unlock(&obj.lock);
 | 
					 | 
				
			||||||
	ww_acquire_fini(&ctx);
 | 
					 | 
				
			||||||
	mmap_read_unlock(mm);
 | 
					 | 
				
			||||||
	
 | 
					 | 
				
			||||||
	mmput(mm);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
subsys_initcall(dma_resv_lockdep);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * dma_resv_init - initialize a reservation object
 | 
					 * dma_resv_init - initialize a reservation object
 | 
				
			||||||
 * @obj: the reservation object
 | 
					 * @obj: the reservation object
 | 
				
			||||||
| 
						 | 
					@ -191,14 +149,11 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dma_resv_assert_held(obj);
 | 
						dma_resv_assert_held(obj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	old = dma_resv_get_list(obj);
 | 
						old = dma_resv_shared_list(obj);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (old && old->shared_max) {
 | 
						if (old && old->shared_max) {
 | 
				
			||||||
		if ((old->shared_count + num_fences) <= old->shared_max)
 | 
							if ((old->shared_count + num_fences) <= old->shared_max)
 | 
				
			||||||
			return 0;
 | 
								return 0;
 | 
				
			||||||
		else
 | 
							max = max(old->shared_count + num_fences, old->shared_max * 2);
 | 
				
			||||||
			max = max(old->shared_count + num_fences,
 | 
					 | 
				
			||||||
				  old->shared_max * 2);
 | 
					 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		max = max(4ul, roundup_pow_of_two(num_fences));
 | 
							max = max(4ul, roundup_pow_of_two(num_fences));
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -252,6 +207,28 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(dma_resv_reserve_shared);
 | 
					EXPORT_SYMBOL(dma_resv_reserve_shared);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_DEBUG_MUTEXES
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * dma_resv_reset_shared_max - reset shared fences for debugging
 | 
				
			||||||
 | 
					 * @obj: the dma_resv object to reset
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Reset the number of pre-reserved shared slots to test that drivers do
 | 
				
			||||||
 | 
					 * correct slot allocation using dma_resv_reserve_shared(). See also
 | 
				
			||||||
 | 
					 * &dma_resv_list.shared_max.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void dma_resv_reset_shared_max(struct dma_resv *obj)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct dma_resv_list *fences = dma_resv_shared_list(obj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dma_resv_assert_held(obj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Test shared fence slot reservation */
 | 
				
			||||||
 | 
						if (fences)
 | 
				
			||||||
 | 
							fences->shared_max = fences->shared_count;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL(dma_resv_reset_shared_max);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * dma_resv_add_shared_fence - Add a fence to a shared slot
 | 
					 * dma_resv_add_shared_fence - Add a fence to a shared slot
 | 
				
			||||||
 * @obj: the reservation object
 | 
					 * @obj: the reservation object
 | 
				
			||||||
| 
						 | 
					@ -270,7 +247,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dma_resv_assert_held(obj);
 | 
						dma_resv_assert_held(obj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fobj = dma_resv_get_list(obj);
 | 
						fobj = dma_resv_shared_list(obj);
 | 
				
			||||||
	count = fobj->shared_count;
 | 
						count = fobj->shared_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	write_seqcount_begin(&obj->seq);
 | 
						write_seqcount_begin(&obj->seq);
 | 
				
			||||||
| 
						 | 
					@ -307,13 +284,13 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 | 
					void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dma_fence *old_fence = dma_resv_get_excl(obj);
 | 
						struct dma_fence *old_fence = dma_resv_excl_fence(obj);
 | 
				
			||||||
	struct dma_resv_list *old;
 | 
						struct dma_resv_list *old;
 | 
				
			||||||
	u32 i = 0;
 | 
						u32 i = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dma_resv_assert_held(obj);
 | 
						dma_resv_assert_held(obj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	old = dma_resv_get_list(obj);
 | 
						old = dma_resv_shared_list(obj);
 | 
				
			||||||
	if (old)
 | 
						if (old)
 | 
				
			||||||
		i = old->shared_count;
 | 
							i = old->shared_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -337,26 +314,26 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 | 
				
			||||||
EXPORT_SYMBOL(dma_resv_add_excl_fence);
 | 
					EXPORT_SYMBOL(dma_resv_add_excl_fence);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
* dma_resv_copy_fences - Copy all fences from src to dst.
 | 
					 * dma_resv_copy_fences - Copy all fences from src to dst.
 | 
				
			||||||
* @dst: the destination reservation object
 | 
					 * @dst: the destination reservation object
 | 
				
			||||||
* @src: the source reservation object
 | 
					 * @src: the source reservation object
 | 
				
			||||||
*
 | 
					 *
 | 
				
			||||||
* Copy all fences from src to dst. dst-lock must be held.
 | 
					 * Copy all fences from src to dst. dst-lock must be held.
 | 
				
			||||||
*/
 | 
					 */
 | 
				
			||||||
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 | 
					int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dma_resv_list *src_list, *dst_list;
 | 
						struct dma_resv_list *src_list, *dst_list;
 | 
				
			||||||
	struct dma_fence *old, *new;
 | 
						struct dma_fence *old, *new;
 | 
				
			||||||
	unsigned i;
 | 
						unsigned int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dma_resv_assert_held(dst);
 | 
						dma_resv_assert_held(dst);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rcu_read_lock();
 | 
						rcu_read_lock();
 | 
				
			||||||
	src_list = rcu_dereference(src->fence);
 | 
						src_list = dma_resv_shared_list(src);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
retry:
 | 
					retry:
 | 
				
			||||||
	if (src_list) {
 | 
						if (src_list) {
 | 
				
			||||||
		unsigned shared_count = src_list->shared_count;
 | 
							unsigned int shared_count = src_list->shared_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		rcu_read_unlock();
 | 
							rcu_read_unlock();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -365,7 +342,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 | 
				
			||||||
			return -ENOMEM;
 | 
								return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		rcu_read_lock();
 | 
							rcu_read_lock();
 | 
				
			||||||
		src_list = rcu_dereference(src->fence);
 | 
							src_list = dma_resv_shared_list(src);
 | 
				
			||||||
		if (!src_list || src_list->shared_count > shared_count) {
 | 
							if (!src_list || src_list->shared_count > shared_count) {
 | 
				
			||||||
			kfree(dst_list);
 | 
								kfree(dst_list);
 | 
				
			||||||
			goto retry;
 | 
								goto retry;
 | 
				
			||||||
| 
						 | 
					@ -373,6 +350,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dst_list->shared_count = 0;
 | 
							dst_list->shared_count = 0;
 | 
				
			||||||
		for (i = 0; i < src_list->shared_count; ++i) {
 | 
							for (i = 0; i < src_list->shared_count; ++i) {
 | 
				
			||||||
 | 
								struct dma_fence __rcu **dst;
 | 
				
			||||||
			struct dma_fence *fence;
 | 
								struct dma_fence *fence;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			fence = rcu_dereference(src_list->shared[i]);
 | 
								fence = rcu_dereference(src_list->shared[i]);
 | 
				
			||||||
| 
						 | 
					@ -382,7 +360,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (!dma_fence_get_rcu(fence)) {
 | 
								if (!dma_fence_get_rcu(fence)) {
 | 
				
			||||||
				dma_resv_list_free(dst_list);
 | 
									dma_resv_list_free(dst_list);
 | 
				
			||||||
				src_list = rcu_dereference(src->fence);
 | 
									src_list = dma_resv_shared_list(src);
 | 
				
			||||||
				goto retry;
 | 
									goto retry;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -391,7 +369,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
 | 
								dst = &dst_list->shared[dst_list->shared_count++];
 | 
				
			||||||
 | 
								rcu_assign_pointer(*dst, fence);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		dst_list = NULL;
 | 
							dst_list = NULL;
 | 
				
			||||||
| 
						 | 
					@ -400,8 +379,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 | 
				
			||||||
	new = dma_fence_get_rcu_safe(&src->fence_excl);
 | 
						new = dma_fence_get_rcu_safe(&src->fence_excl);
 | 
				
			||||||
	rcu_read_unlock();
 | 
						rcu_read_unlock();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	src_list = dma_resv_get_list(dst);
 | 
						src_list = dma_resv_shared_list(dst);
 | 
				
			||||||
	old = dma_resv_get_excl(dst);
 | 
						old = dma_resv_excl_fence(dst);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	write_seqcount_begin(&dst->seq);
 | 
						write_seqcount_begin(&dst->seq);
 | 
				
			||||||
	/* write_seqcount_begin provides the necessary memory barrier */
 | 
						/* write_seqcount_begin provides the necessary memory barrier */
 | 
				
			||||||
| 
						 | 
					@ -417,7 +396,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 | 
				
			||||||
EXPORT_SYMBOL(dma_resv_copy_fences);
 | 
					EXPORT_SYMBOL(dma_resv_copy_fences);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * dma_resv_get_fences_rcu - Get an object's shared and exclusive
 | 
					 * dma_resv_get_fences - Get an object's shared and exclusive
 | 
				
			||||||
 * fences without update side lock held
 | 
					 * fences without update side lock held
 | 
				
			||||||
 * @obj: the reservation object
 | 
					 * @obj: the reservation object
 | 
				
			||||||
 * @pfence_excl: the returned exclusive fence (or NULL)
 | 
					 * @pfence_excl: the returned exclusive fence (or NULL)
 | 
				
			||||||
| 
						 | 
					@ -429,10 +408,9 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
 | 
				
			||||||
 * exclusive fence is not specified the fence is put into the array of the
 | 
					 * exclusive fence is not specified the fence is put into the array of the
 | 
				
			||||||
 * shared fences as well. Returns either zero or -ENOMEM.
 | 
					 * shared fences as well. Returns either zero or -ENOMEM.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
int dma_resv_get_fences_rcu(struct dma_resv *obj,
 | 
					int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
 | 
				
			||||||
			    struct dma_fence **pfence_excl,
 | 
								unsigned int *pshared_count,
 | 
				
			||||||
			    unsigned *pshared_count,
 | 
								struct dma_fence ***pshared)
 | 
				
			||||||
			    struct dma_fence ***pshared)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dma_fence **shared = NULL;
 | 
						struct dma_fence **shared = NULL;
 | 
				
			||||||
	struct dma_fence *fence_excl;
 | 
						struct dma_fence *fence_excl;
 | 
				
			||||||
| 
						 | 
					@ -449,11 +427,11 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 | 
				
			||||||
		rcu_read_lock();
 | 
							rcu_read_lock();
 | 
				
			||||||
		seq = read_seqcount_begin(&obj->seq);
 | 
							seq = read_seqcount_begin(&obj->seq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		fence_excl = rcu_dereference(obj->fence_excl);
 | 
							fence_excl = dma_resv_excl_fence(obj);
 | 
				
			||||||
		if (fence_excl && !dma_fence_get_rcu(fence_excl))
 | 
							if (fence_excl && !dma_fence_get_rcu(fence_excl))
 | 
				
			||||||
			goto unlock;
 | 
								goto unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		fobj = rcu_dereference(obj->fence);
 | 
							fobj = dma_resv_shared_list(obj);
 | 
				
			||||||
		if (fobj)
 | 
							if (fobj)
 | 
				
			||||||
			sz += sizeof(*shared) * fobj->shared_max;
 | 
								sz += sizeof(*shared) * fobj->shared_max;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -515,27 +493,28 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 | 
				
			||||||
	*pshared = shared;
 | 
						*pshared = shared;
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
 | 
					EXPORT_SYMBOL_GPL(dma_resv_get_fences);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * dma_resv_wait_timeout_rcu - Wait on reservation's objects
 | 
					 * dma_resv_wait_timeout - Wait on reservation's objects
 | 
				
			||||||
 * shared and/or exclusive fences.
 | 
					 * shared and/or exclusive fences.
 | 
				
			||||||
 * @obj: the reservation object
 | 
					 * @obj: the reservation object
 | 
				
			||||||
 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
 | 
					 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
 | 
				
			||||||
 * @intr: if true, do interruptible wait
 | 
					 * @intr: if true, do interruptible wait
 | 
				
			||||||
 * @timeout: timeout value in jiffies or zero to return immediately
 | 
					 * @timeout: timeout value in jiffies or zero to return immediately
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 | 
					 * Callers are not required to hold specific locks, but maybe hold
 | 
				
			||||||
 | 
					 * dma_resv_lock() already
 | 
				
			||||||
 * RETURNS
 | 
					 * RETURNS
 | 
				
			||||||
 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
 | 
					 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
 | 
				
			||||||
 * greater than zer on success.
 | 
					 * greater than zer on success.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 | 
					long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
 | 
				
			||||||
			       bool wait_all, bool intr,
 | 
								   unsigned long timeout)
 | 
				
			||||||
			       unsigned long timeout)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dma_fence *fence;
 | 
					 | 
				
			||||||
	unsigned seq, shared_count;
 | 
					 | 
				
			||||||
	long ret = timeout ? timeout : 1;
 | 
						long ret = timeout ? timeout : 1;
 | 
				
			||||||
 | 
						unsigned int seq, shared_count;
 | 
				
			||||||
 | 
						struct dma_fence *fence;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
retry:
 | 
					retry:
 | 
				
			||||||
| 
						 | 
					@ -544,7 +523,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 | 
				
			||||||
	rcu_read_lock();
 | 
						rcu_read_lock();
 | 
				
			||||||
	i = -1;
 | 
						i = -1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fence = rcu_dereference(obj->fence_excl);
 | 
						fence = dma_resv_excl_fence(obj);
 | 
				
			||||||
	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 | 
						if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 | 
				
			||||||
		if (!dma_fence_get_rcu(fence))
 | 
							if (!dma_fence_get_rcu(fence))
 | 
				
			||||||
			goto unlock_retry;
 | 
								goto unlock_retry;
 | 
				
			||||||
| 
						 | 
					@ -559,14 +538,15 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (wait_all) {
 | 
						if (wait_all) {
 | 
				
			||||||
		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
 | 
							struct dma_resv_list *fobj = dma_resv_shared_list(obj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (fobj)
 | 
							if (fobj)
 | 
				
			||||||
			shared_count = fobj->shared_count;
 | 
								shared_count = fobj->shared_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for (i = 0; !fence && i < shared_count; ++i) {
 | 
							for (i = 0; !fence && i < shared_count; ++i) {
 | 
				
			||||||
			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
 | 
								struct dma_fence *lfence;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								lfence = rcu_dereference(fobj->shared[i]);
 | 
				
			||||||
			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
 | 
								if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
 | 
				
			||||||
				     &lfence->flags))
 | 
									     &lfence->flags))
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
| 
						 | 
					@ -602,7 +582,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 | 
				
			||||||
	rcu_read_unlock();
 | 
						rcu_read_unlock();
 | 
				
			||||||
	goto retry;
 | 
						goto retry;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
 | 
					EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
 | 
					static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
 | 
				
			||||||
| 
						 | 
					@ -622,18 +602,20 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * dma_resv_test_signaled_rcu - Test if a reservation object's
 | 
					 * dma_resv_test_signaled - Test if a reservation object's fences have been
 | 
				
			||||||
 * fences have been signaled.
 | 
					 * signaled.
 | 
				
			||||||
 * @obj: the reservation object
 | 
					 * @obj: the reservation object
 | 
				
			||||||
 * @test_all: if true, test all fences, otherwise only test the exclusive
 | 
					 * @test_all: if true, test all fences, otherwise only test the exclusive
 | 
				
			||||||
 * fence
 | 
					 * fence
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 | 
					 * Callers are not required to hold specific locks, but maybe hold
 | 
				
			||||||
 | 
					 * dma_resv_lock() already
 | 
				
			||||||
 * RETURNS
 | 
					 * RETURNS
 | 
				
			||||||
 * true if all fences signaled, else false
 | 
					 * true if all fences signaled, else false
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 | 
					bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned seq, shared_count;
 | 
						unsigned int seq, shared_count;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rcu_read_lock();
 | 
						rcu_read_lock();
 | 
				
			||||||
| 
						 | 
					@ -643,16 +625,16 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 | 
				
			||||||
	seq = read_seqcount_begin(&obj->seq);
 | 
						seq = read_seqcount_begin(&obj->seq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (test_all) {
 | 
						if (test_all) {
 | 
				
			||||||
		unsigned i;
 | 
							struct dma_resv_list *fobj = dma_resv_shared_list(obj);
 | 
				
			||||||
 | 
							unsigned int i;
 | 
				
			||||||
		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (fobj)
 | 
							if (fobj)
 | 
				
			||||||
			shared_count = fobj->shared_count;
 | 
								shared_count = fobj->shared_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for (i = 0; i < shared_count; ++i) {
 | 
							for (i = 0; i < shared_count; ++i) {
 | 
				
			||||||
			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
 | 
								struct dma_fence *fence;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								fence = rcu_dereference(fobj->shared[i]);
 | 
				
			||||||
			ret = dma_resv_test_signaled_single(fence);
 | 
								ret = dma_resv_test_signaled_single(fence);
 | 
				
			||||||
			if (ret < 0)
 | 
								if (ret < 0)
 | 
				
			||||||
				goto retry;
 | 
									goto retry;
 | 
				
			||||||
| 
						 | 
					@ -665,7 +647,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!shared_count) {
 | 
						if (!shared_count) {
 | 
				
			||||||
		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
 | 
							struct dma_fence *fence_excl = dma_resv_excl_fence(obj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (fence_excl) {
 | 
							if (fence_excl) {
 | 
				
			||||||
			ret = dma_resv_test_signaled_single(fence_excl);
 | 
								ret = dma_resv_test_signaled_single(fence_excl);
 | 
				
			||||||
| 
						 | 
					@ -680,4 +662,47 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 | 
				
			||||||
	rcu_read_unlock();
 | 
						rcu_read_unlock();
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
 | 
					EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#if IS_ENABLED(CONFIG_LOCKDEP)
 | 
				
			||||||
 | 
					static int __init dma_resv_lockdep(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct mm_struct *mm = mm_alloc();
 | 
				
			||||||
 | 
						struct ww_acquire_ctx ctx;
 | 
				
			||||||
 | 
						struct dma_resv obj;
 | 
				
			||||||
 | 
						struct address_space mapping;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!mm)
 | 
				
			||||||
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dma_resv_init(&obj);
 | 
				
			||||||
 | 
						address_space_init_once(&mapping);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mmap_read_lock(mm);
 | 
				
			||||||
 | 
						ww_acquire_init(&ctx, &reservation_ww_class);
 | 
				
			||||||
 | 
						ret = dma_resv_lock(&obj, &ctx);
 | 
				
			||||||
 | 
						if (ret == -EDEADLK)
 | 
				
			||||||
 | 
							dma_resv_lock_slow(&obj, &ctx);
 | 
				
			||||||
 | 
						fs_reclaim_acquire(GFP_KERNEL);
 | 
				
			||||||
 | 
						/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
 | 
				
			||||||
 | 
						i_mmap_lock_write(&mapping);
 | 
				
			||||||
 | 
						i_mmap_unlock_write(&mapping);
 | 
				
			||||||
 | 
					#ifdef CONFIG_MMU_NOTIFIER
 | 
				
			||||||
 | 
						lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
 | 
				
			||||||
 | 
						__dma_fence_might_wait();
 | 
				
			||||||
 | 
						lock_map_release(&__mmu_notifier_invalidate_range_start_map);
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
						__dma_fence_might_wait();
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
						fs_reclaim_release(GFP_KERNEL);
 | 
				
			||||||
 | 
						ww_mutex_unlock(&obj.lock);
 | 
				
			||||||
 | 
						ww_acquire_fini(&ctx);
 | 
				
			||||||
 | 
						mmap_read_unlock(mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mmput(mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					subsys_initcall(dma_resv_lockdep);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -98,8 +98,8 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
 | 
				
			||||||
config DRM_FBDEV_EMULATION
 | 
					config DRM_FBDEV_EMULATION
 | 
				
			||||||
	bool "Enable legacy fbdev support for your modesetting driver"
 | 
						bool "Enable legacy fbdev support for your modesetting driver"
 | 
				
			||||||
	depends on DRM
 | 
						depends on DRM
 | 
				
			||||||
 | 
						depends on FB
 | 
				
			||||||
	select DRM_KMS_HELPER
 | 
						select DRM_KMS_HELPER
 | 
				
			||||||
	select FB
 | 
					 | 
				
			||||||
	select FB_CFB_FILLRECT
 | 
						select FB_CFB_FILLRECT
 | 
				
			||||||
	select FB_CFB_COPYAREA
 | 
						select FB_CFB_COPYAREA
 | 
				
			||||||
	select FB_CFB_IMAGEBLIT
 | 
						select FB_CFB_IMAGEBLIT
 | 
				
			||||||
| 
						 | 
					@ -379,6 +379,19 @@ source "drivers/gpu/drm/xlnx/Kconfig"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
source "drivers/gpu/drm/gud/Kconfig"
 | 
					source "drivers/gpu/drm/gud/Kconfig"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					config DRM_HYPERV
 | 
				
			||||||
 | 
						tristate "DRM Support for Hyper-V synthetic video device"
 | 
				
			||||||
 | 
						depends on DRM && PCI && MMU && HYPERV
 | 
				
			||||||
 | 
						select DRM_KMS_HELPER
 | 
				
			||||||
 | 
						select DRM_GEM_SHMEM_HELPER
 | 
				
			||||||
 | 
						help
 | 
				
			||||||
 | 
						 This is a KMS driver for Hyper-V synthetic video device. Choose this
 | 
				
			||||||
 | 
						 option if you would like to enable drm driver for Hyper-V virtual
 | 
				
			||||||
 | 
						 machine. Unselect Hyper-V framebuffer driver (CONFIG_FB_HYPERV) so
 | 
				
			||||||
 | 
						 that DRM driver is used by default.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						 If M is selected the module will be called hyperv_drm.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# Keep legacy drivers last
 | 
					# Keep legacy drivers last
 | 
				
			||||||
 | 
					
 | 
				
			||||||
menuconfig DRM_LEGACY
 | 
					menuconfig DRM_LEGACY
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -126,3 +126,4 @@ obj-$(CONFIG_DRM_MCDE) += mcde/
 | 
				
			||||||
obj-$(CONFIG_DRM_TIDSS) += tidss/
 | 
					obj-$(CONFIG_DRM_TIDSS) += tidss/
 | 
				
			||||||
obj-y			+= xlnx/
 | 
					obj-y			+= xlnx/
 | 
				
			||||||
obj-y			+= gud/
 | 
					obj-y			+= gud/
 | 
				
			||||||
 | 
					obj-$(CONFIG_DRM_HYPERV) += hyperv/
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 | 
				
			||||||
	if (!ef)
 | 
						if (!ef)
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	old = dma_resv_get_list(resv);
 | 
						old = dma_resv_shared_list(resv);
 | 
				
			||||||
	if (!old)
 | 
						if (!old)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1668,7 +1668,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
 | 
				
			||||||
	 * the next restore worker
 | 
						 * the next restore worker
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
 | 
						if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
 | 
				
			||||||
	    bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
 | 
						    bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
 | 
				
			||||||
		is_invalid_userptr = true;
 | 
							is_invalid_userptr = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = vm_validate_pt_pd_bos(avm);
 | 
						ret = vm_validate_pt_pd_bos(avm);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4127,9 +4127,9 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
 | 
				
			||||||
	list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
 | 
						list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* No need to recover an evicted BO */
 | 
							/* No need to recover an evicted BO */
 | 
				
			||||||
		if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
 | 
							if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
 | 
				
			||||||
		    shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
 | 
							    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
 | 
				
			||||||
		    shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
 | 
							    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		r = amdgpu_bo_restore_shadow(shadow, &next);
 | 
							r = amdgpu_bo_restore_shadow(shadow, &next);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -203,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
 | 
				
			||||||
		goto unpin;
 | 
							goto unpin;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
 | 
						r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
 | 
				
			||||||
					      &work->shared_count,
 | 
									&work->shared_count, &work->shared);
 | 
				
			||||||
					      &work->shared);
 | 
					 | 
				
			||||||
	if (unlikely(r != 0)) {
 | 
						if (unlikely(r != 0)) {
 | 
				
			||||||
		DRM_ERROR("failed to get fences for buffer\n");
 | 
							DRM_ERROR("failed to get fences for buffer\n");
 | 
				
			||||||
		goto unpin;
 | 
							goto unpin;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -49,10 +49,10 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
 | 
				
			||||||
	unsigned int count;
 | 
						unsigned int count;
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!dma_resv_get_list(obj)) /* no shared fences to convert */
 | 
						if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
 | 
						r = dma_resv_get_fences(obj, NULL, &count, &fences);
 | 
				
			||||||
	if (r)
 | 
						if (r)
 | 
				
			||||||
		return r;
 | 
							return r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -226,12 +226,12 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
 | 
				
			||||||
		if (r)
 | 
							if (r)
 | 
				
			||||||
			return ERR_PTR(r);
 | 
								return ERR_PTR(r);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	} else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
 | 
						} else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
 | 
				
			||||||
		     AMDGPU_GEM_DOMAIN_GTT)) {
 | 
							     AMDGPU_GEM_DOMAIN_GTT)) {
 | 
				
			||||||
		return ERR_PTR(-EBUSY);
 | 
							return ERR_PTR(-EBUSY);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (bo->tbo.mem.mem_type) {
 | 
						switch (bo->tbo.resource->mem_type) {
 | 
				
			||||||
	case TTM_PL_TT:
 | 
						case TTM_PL_TT:
 | 
				
			||||||
		sgt = drm_prime_pages_to_sg(obj->dev,
 | 
							sgt = drm_prime_pages_to_sg(obj->dev,
 | 
				
			||||||
					    bo->tbo.ttm->pages,
 | 
										    bo->tbo.ttm->pages,
 | 
				
			||||||
| 
						 | 
					@ -245,8 +245,9 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	case TTM_PL_VRAM:
 | 
						case TTM_PL_VRAM:
 | 
				
			||||||
		r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0,
 | 
							r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
 | 
				
			||||||
				bo->tbo.base.size, attach->dev, dir, &sgt);
 | 
										      bo->tbo.base.size, attach->dev,
 | 
				
			||||||
 | 
										      dir, &sgt);
 | 
				
			||||||
		if (r)
 | 
							if (r)
 | 
				
			||||||
			return ERR_PTR(r);
 | 
								return ERR_PTR(r);
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
| 
						 | 
					@ -436,7 +437,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
 | 
				
			||||||
	struct amdgpu_vm_bo_base *bo_base;
 | 
						struct amdgpu_vm_bo_base *bo_base;
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
 | 
						if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
 | 
						r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
 | 
				
			||||||
	if (!amdgpu_vm_ready(vm))
 | 
						if (!amdgpu_vm_ready(vm))
 | 
				
			||||||
		goto out_unlock;
 | 
							goto out_unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fence = dma_resv_get_excl(bo->tbo.base.resv);
 | 
						fence = dma_resv_excl_fence(bo->tbo.base.resv);
 | 
				
			||||||
	if (fence) {
 | 
						if (fence) {
 | 
				
			||||||
		amdgpu_bo_fence(bo, fence, true);
 | 
							amdgpu_bo_fence(bo, fence, true);
 | 
				
			||||||
		fence = NULL;
 | 
							fence = NULL;
 | 
				
			||||||
| 
						 | 
					@ -526,8 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 | 
				
			||||||
		return -ENOENT;
 | 
							return -ENOENT;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	robj = gem_to_amdgpu_bo(gobj);
 | 
						robj = gem_to_amdgpu_bo(gobj);
 | 
				
			||||||
	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
 | 
						ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
 | 
				
			||||||
						  timeout);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* ret == 0 means not signaled,
 | 
						/* ret == 0 means not signaled,
 | 
				
			||||||
	 * ret > 0 means signaled
 | 
						 * ret > 0 means signaled
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -101,7 +101,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 | 
						struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (bo->tbo.mem.mem_type) {
 | 
						switch (bo->tbo.resource->mem_type) {
 | 
				
			||||||
	case TTM_PL_TT:
 | 
						case TTM_PL_TT:
 | 
				
			||||||
		*addr = bo->tbo.ttm->dma_address[0];
 | 
							*addr = bo->tbo.ttm->dma_address[0];
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
| 
						 | 
					@ -112,7 +112,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
 | 
				
			||||||
		*addr = 0;
 | 
							*addr = 0;
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	*flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem);
 | 
						*flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
 | 
				
			||||||
	amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
 | 
						amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -22,18 +22,26 @@
 | 
				
			||||||
 * Authors: Christian König
 | 
					 * Authors: Christian König
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <drm/ttm/ttm_range_manager.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "amdgpu.h"
 | 
					#include "amdgpu.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct amdgpu_gtt_node {
 | 
				
			||||||
 | 
						struct ttm_buffer_object *tbo;
 | 
				
			||||||
 | 
						struct ttm_range_mgr_node base;
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline struct amdgpu_gtt_mgr *
 | 
					static inline struct amdgpu_gtt_mgr *
 | 
				
			||||||
to_gtt_mgr(struct ttm_resource_manager *man)
 | 
					to_gtt_mgr(struct ttm_resource_manager *man)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return container_of(man, struct amdgpu_gtt_mgr, manager);
 | 
						return container_of(man, struct amdgpu_gtt_mgr, manager);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct amdgpu_gtt_node {
 | 
					static inline struct amdgpu_gtt_node *
 | 
				
			||||||
	struct drm_mm_node node;
 | 
					to_amdgpu_gtt_node(struct ttm_resource *res)
 | 
				
			||||||
	struct ttm_buffer_object *tbo;
 | 
					{
 | 
				
			||||||
};
 | 
						return container_of(res, struct amdgpu_gtt_node, base.base);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * DOC: mem_info_gtt_total
 | 
					 * DOC: mem_info_gtt_total
 | 
				
			||||||
| 
						 | 
					@ -93,13 +101,15 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
 | 
					 * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * @mem: the mem object to check
 | 
					 * @res: the mem object to check
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Check if a mem object has already address space allocated.
 | 
					 * Check if a mem object has already address space allocated.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
 | 
					bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return mem->mm_node != NULL;
 | 
						struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return drm_mm_node_allocated(&node->base.mm_nodes[0]);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -115,54 +125,57 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
 | 
				
			||||||
static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 | 
					static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 | 
				
			||||||
			      struct ttm_buffer_object *tbo,
 | 
								      struct ttm_buffer_object *tbo,
 | 
				
			||||||
			      const struct ttm_place *place,
 | 
								      const struct ttm_place *place,
 | 
				
			||||||
			      struct ttm_resource *mem)
 | 
								      struct ttm_resource **res)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
 | 
						struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
 | 
				
			||||||
 | 
						uint32_t num_pages = PFN_UP(tbo->base.size);
 | 
				
			||||||
	struct amdgpu_gtt_node *node;
 | 
						struct amdgpu_gtt_node *node;
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock(&mgr->lock);
 | 
						spin_lock(&mgr->lock);
 | 
				
			||||||
	if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
 | 
						if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT &&
 | 
				
			||||||
	    atomic64_read(&mgr->available) < mem->num_pages) {
 | 
						    atomic64_read(&mgr->available) < num_pages) {
 | 
				
			||||||
		spin_unlock(&mgr->lock);
 | 
							spin_unlock(&mgr->lock);
 | 
				
			||||||
		return -ENOSPC;
 | 
							return -ENOSPC;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	atomic64_sub(mem->num_pages, &mgr->available);
 | 
						atomic64_sub(num_pages, &mgr->available);
 | 
				
			||||||
	spin_unlock(&mgr->lock);
 | 
						spin_unlock(&mgr->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!place->lpfn) {
 | 
						node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
 | 
				
			||||||
		mem->mm_node = NULL;
 | 
					 | 
				
			||||||
		mem->start = AMDGPU_BO_INVALID_OFFSET;
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	node = kzalloc(sizeof(*node), GFP_KERNEL);
 | 
					 | 
				
			||||||
	if (!node) {
 | 
						if (!node) {
 | 
				
			||||||
		r = -ENOMEM;
 | 
							r = -ENOMEM;
 | 
				
			||||||
		goto err_out;
 | 
							goto err_out;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	node->tbo = tbo;
 | 
						node->tbo = tbo;
 | 
				
			||||||
 | 
						ttm_resource_init(tbo, place, &node->base.base);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock(&mgr->lock);
 | 
						if (place->lpfn) {
 | 
				
			||||||
	r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
 | 
							spin_lock(&mgr->lock);
 | 
				
			||||||
					tbo->page_alignment, 0, place->fpfn,
 | 
							r = drm_mm_insert_node_in_range(&mgr->mm,
 | 
				
			||||||
					place->lpfn, DRM_MM_INSERT_BEST);
 | 
											&node->base.mm_nodes[0],
 | 
				
			||||||
	spin_unlock(&mgr->lock);
 | 
											num_pages, tbo->page_alignment,
 | 
				
			||||||
 | 
											0, place->fpfn, place->lpfn,
 | 
				
			||||||
 | 
											DRM_MM_INSERT_BEST);
 | 
				
			||||||
 | 
							spin_unlock(&mgr->lock);
 | 
				
			||||||
 | 
							if (unlikely(r))
 | 
				
			||||||
 | 
								goto err_free;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (unlikely(r))
 | 
							node->base.base.start = node->base.mm_nodes[0].start;
 | 
				
			||||||
		goto err_free;
 | 
						} else {
 | 
				
			||||||
 | 
							node->base.mm_nodes[0].start = 0;
 | 
				
			||||||
	mem->mm_node = node;
 | 
							node->base.mm_nodes[0].size = node->base.base.num_pages;
 | 
				
			||||||
	mem->start = node->node.start;
 | 
							node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						*res = &node->base.base;
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
err_free:
 | 
					err_free:
 | 
				
			||||||
	kfree(node);
 | 
						kfree(node);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
err_out:
 | 
					err_out:
 | 
				
			||||||
	atomic64_add(mem->num_pages, &mgr->available);
 | 
						atomic64_add(num_pages, &mgr->available);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return r;
 | 
						return r;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -176,19 +189,18 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 | 
				
			||||||
 * Free the allocated GTT again.
 | 
					 * Free the allocated GTT again.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
 | 
					static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
 | 
				
			||||||
			       struct ttm_resource *mem)
 | 
								       struct ttm_resource *res)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
 | 
				
			||||||
	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
 | 
						struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
 | 
				
			||||||
	struct amdgpu_gtt_node *node = mem->mm_node;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (node) {
 | 
						spin_lock(&mgr->lock);
 | 
				
			||||||
		spin_lock(&mgr->lock);
 | 
						if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
 | 
				
			||||||
		drm_mm_remove_node(&node->node);
 | 
							drm_mm_remove_node(&node->base.mm_nodes[0]);
 | 
				
			||||||
		spin_unlock(&mgr->lock);
 | 
						spin_unlock(&mgr->lock);
 | 
				
			||||||
		kfree(node);
 | 
						atomic64_add(res->num_pages, &mgr->available);
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic64_add(mem->num_pages, &mgr->available);
 | 
						kfree(node);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -224,7 +236,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
 | 
				
			||||||
	adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
 | 
						adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
 | 
				
			||||||
	spin_lock(&mgr->lock);
 | 
						spin_lock(&mgr->lock);
 | 
				
			||||||
	drm_mm_for_each_node(mm_node, &mgr->mm) {
 | 
						drm_mm_for_each_node(mm_node, &mgr->mm) {
 | 
				
			||||||
		node = container_of(mm_node, struct amdgpu_gtt_node, node);
 | 
							node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
 | 
				
			||||||
		r = amdgpu_ttm_recover_gart(node->tbo);
 | 
							r = amdgpu_ttm_recover_gart(node->tbo);
 | 
				
			||||||
		if (r)
 | 
							if (r)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 | 
				
			||||||
	unsigned count;
 | 
						unsigned count;
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
 | 
						r = dma_resv_get_fences(resv, NULL, &count, &fences);
 | 
				
			||||||
	if (r)
 | 
						if (r)
 | 
				
			||||||
		goto fallback;
 | 
							goto fallback;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -156,8 +156,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 | 
				
			||||||
	/* Not enough memory for the delayed delete, as last resort
 | 
						/* Not enough memory for the delayed delete, as last resort
 | 
				
			||||||
	 * block for all the fences to complete.
 | 
						 * block for all the fences to complete.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	dma_resv_wait_timeout_rcu(resv, true, false,
 | 
						dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
 | 
				
			||||||
					    MAX_SCHEDULE_TIMEOUT);
 | 
					 | 
				
			||||||
	amdgpu_pasid_free(pasid);
 | 
						amdgpu_pasid_free(pasid);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mmu_interval_set_seq(mni, cur_seq);
 | 
						mmu_interval_set_seq(mni, cur_seq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
 | 
						r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
 | 
				
			||||||
				      MAX_SCHEDULE_TIMEOUT);
 | 
									  MAX_SCHEDULE_TIMEOUT);
 | 
				
			||||||
	mutex_unlock(&adev->notifier_lock);
 | 
						mutex_unlock(&adev->notifier_lock);
 | 
				
			||||||
	if (r <= 0)
 | 
						if (r <= 0)
 | 
				
			||||||
		DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 | 
							DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -364,14 +364,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
 | 
				
			||||||
	if (cpu_addr)
 | 
						if (cpu_addr)
 | 
				
			||||||
		amdgpu_bo_kunmap(*bo_ptr);
 | 
							amdgpu_bo_kunmap(*bo_ptr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
 | 
						ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
 | 
						for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
 | 
				
			||||||
		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
 | 
							(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
 | 
				
			||||||
		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
 | 
							(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
 | 
						r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
 | 
				
			||||||
			     &(*bo_ptr)->tbo.mem, &ctx);
 | 
								     &(*bo_ptr)->tbo.resource, &ctx);
 | 
				
			||||||
	if (r)
 | 
						if (r)
 | 
				
			||||||
		goto error;
 | 
							goto error;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -575,15 +575,15 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
 | 
				
			||||||
		return r;
 | 
							return r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 | 
						if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 | 
				
			||||||
	    bo->tbo.mem.mem_type == TTM_PL_VRAM &&
 | 
						    bo->tbo.resource->mem_type == TTM_PL_VRAM &&
 | 
				
			||||||
	    bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
 | 
						    bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
 | 
				
			||||||
		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
 | 
							amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
 | 
				
			||||||
					     ctx.bytes_moved);
 | 
										     ctx.bytes_moved);
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 | 
							amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 | 
						if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 | 
				
			||||||
	    bo->tbo.mem.mem_type == TTM_PL_VRAM) {
 | 
						    bo->tbo.resource->mem_type == TTM_PL_VRAM) {
 | 
				
			||||||
		struct dma_fence *fence;
 | 
							struct dma_fence *fence;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
 | 
							r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
 | 
				
			||||||
| 
						 | 
					@ -777,12 +777,12 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
 | 
						r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
 | 
				
			||||||
						MAX_SCHEDULE_TIMEOUT);
 | 
									  MAX_SCHEDULE_TIMEOUT);
 | 
				
			||||||
	if (r < 0)
 | 
						if (r < 0)
 | 
				
			||||||
		return r;
 | 
							return r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
 | 
						r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
 | 
				
			||||||
	if (r)
 | 
						if (r)
 | 
				
			||||||
		return r;
 | 
							return r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -905,8 +905,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 | 
				
			||||||
	domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
 | 
						domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->tbo.pin_count) {
 | 
						if (bo->tbo.pin_count) {
 | 
				
			||||||
		uint32_t mem_type = bo->tbo.mem.mem_type;
 | 
							uint32_t mem_type = bo->tbo.resource->mem_type;
 | 
				
			||||||
		uint32_t mem_flags = bo->tbo.mem.placement;
 | 
							uint32_t mem_flags = bo->tbo.resource->placement;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
 | 
							if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
 | 
				
			||||||
			return -EINVAL;
 | 
								return -EINVAL;
 | 
				
			||||||
| 
						 | 
					@ -956,7 +956,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ttm_bo_pin(&bo->tbo);
 | 
						ttm_bo_pin(&bo->tbo);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 | 
						domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
 | 
				
			||||||
	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
 | 
						if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
 | 
				
			||||||
		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
 | 
							atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
 | 
				
			||||||
		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
 | 
							atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
 | 
				
			||||||
| 
						 | 
					@ -1008,11 +1008,11 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
 | 
				
			||||||
	if (bo->tbo.base.import_attach)
 | 
						if (bo->tbo.base.import_attach)
 | 
				
			||||||
		dma_buf_unpin(bo->tbo.base.import_attach);
 | 
							dma_buf_unpin(bo->tbo.base.import_attach);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
 | 
						if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
 | 
				
			||||||
		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
 | 
							atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
 | 
				
			||||||
		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
 | 
							atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
 | 
				
			||||||
			     &adev->visible_pin_size);
 | 
								     &adev->visible_pin_size);
 | 
				
			||||||
	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
 | 
						} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
 | 
				
			||||||
		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
 | 
							atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1245,7 +1245,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 | 
						struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 | 
				
			||||||
	struct amdgpu_bo *abo;
 | 
						struct amdgpu_bo *abo;
 | 
				
			||||||
	struct ttm_resource *old_mem = &bo->mem;
 | 
						struct ttm_resource *old_mem = bo->resource;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!amdgpu_bo_is_amdgpu_bo(bo))
 | 
						if (!amdgpu_bo_is_amdgpu_bo(bo))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
| 
						 | 
					@ -1256,7 +1256,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 | 
				
			||||||
	amdgpu_bo_kunmap(abo);
 | 
						amdgpu_bo_kunmap(abo);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
 | 
						if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
 | 
				
			||||||
	    bo->mem.mem_type != TTM_PL_SYSTEM)
 | 
						    bo->resource->mem_type != TTM_PL_SYSTEM)
 | 
				
			||||||
		dma_buf_move_notify(abo->tbo.base.dma_buf);
 | 
							dma_buf_move_notify(abo->tbo.base.dma_buf);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* remember the eviction */
 | 
						/* remember the eviction */
 | 
				
			||||||
| 
						 | 
					@ -1276,7 +1276,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int domain;
 | 
						unsigned int domain;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 | 
						domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
 | 
				
			||||||
	switch (domain) {
 | 
						switch (domain) {
 | 
				
			||||||
	case AMDGPU_GEM_DOMAIN_VRAM:
 | 
						case AMDGPU_GEM_DOMAIN_VRAM:
 | 
				
			||||||
		*vram_mem += amdgpu_bo_size(bo);
 | 
							*vram_mem += amdgpu_bo_size(bo);
 | 
				
			||||||
| 
						 | 
					@ -1318,7 +1318,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
 | 
				
			||||||
	if (bo->base.resv == &bo->base._resv)
 | 
						if (bo->base.resv == &bo->base._resv)
 | 
				
			||||||
		amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
 | 
							amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
 | 
						if (bo->resource->mem_type != TTM_PL_VRAM ||
 | 
				
			||||||
	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
 | 
						    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1355,10 +1355,10 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 | 
				
			||||||
	/* Remember that this BO was accessed by the CPU */
 | 
						/* Remember that this BO was accessed by the CPU */
 | 
				
			||||||
	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 | 
						abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->mem.mem_type != TTM_PL_VRAM)
 | 
						if (bo->resource->mem_type != TTM_PL_VRAM)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	offset = bo->mem.start << PAGE_SHIFT;
 | 
						offset = bo->resource->start << PAGE_SHIFT;
 | 
				
			||||||
	if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
 | 
						if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1381,9 +1381,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 | 
				
			||||||
	else if (unlikely(r))
 | 
						else if (unlikely(r))
 | 
				
			||||||
		return VM_FAULT_SIGBUS;
 | 
							return VM_FAULT_SIGBUS;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	offset = bo->mem.start << PAGE_SHIFT;
 | 
						offset = bo->resource->start << PAGE_SHIFT;
 | 
				
			||||||
	/* this should never happen */
 | 
						/* this should never happen */
 | 
				
			||||||
	if (bo->mem.mem_type == TTM_PL_VRAM &&
 | 
						if (bo->resource->mem_type == TTM_PL_VRAM &&
 | 
				
			||||||
	    (offset + bo->base.size) > adev->gmc.visible_vram_size)
 | 
						    (offset + bo->base.size) > adev->gmc.visible_vram_size)
 | 
				
			||||||
		return VM_FAULT_SIGBUS;
 | 
							return VM_FAULT_SIGBUS;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1468,11 +1468,11 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 | 
					u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
 | 
						WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
 | 
				
			||||||
	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
 | 
						WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
 | 
				
			||||||
		     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
 | 
							     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
 | 
				
			||||||
	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
 | 
						WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
 | 
				
			||||||
	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
 | 
						WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
 | 
				
			||||||
		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
 | 
							     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return amdgpu_bo_gpu_offset_no_check(bo);
 | 
						return amdgpu_bo_gpu_offset_no_check(bo);
 | 
				
			||||||
| 
						 | 
					@ -1490,8 +1490,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
 | 
				
			||||||
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 | 
						struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 | 
				
			||||||
	uint64_t offset;
 | 
						uint64_t offset;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	offset = (bo->tbo.mem.start << PAGE_SHIFT) +
 | 
						offset = (bo->tbo.resource->start << PAGE_SHIFT) +
 | 
				
			||||||
		 amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
 | 
							 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return amdgpu_gmc_sign_extend(offset);
 | 
						return amdgpu_gmc_sign_extend(offset);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1544,7 +1544,7 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
 | 
				
			||||||
	unsigned int pin_count;
 | 
						unsigned int pin_count;
 | 
				
			||||||
	u64 size;
 | 
						u64 size;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 | 
						domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
 | 
				
			||||||
	switch (domain) {
 | 
						switch (domain) {
 | 
				
			||||||
	case AMDGPU_GEM_DOMAIN_VRAM:
 | 
						case AMDGPU_GEM_DOMAIN_VRAM:
 | 
				
			||||||
		placement = "VRAM";
 | 
							placement = "VRAM";
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -223,10 +223,10 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
 | 
				
			||||||
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 | 
						struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 | 
				
			||||||
	struct amdgpu_res_cursor cursor;
 | 
						struct amdgpu_res_cursor cursor;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
 | 
						if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_res_first(&bo->tbo.mem, 0, amdgpu_bo_size(bo), &cursor);
 | 
						amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
 | 
				
			||||||
	while (cursor.remaining) {
 | 
						while (cursor.remaining) {
 | 
				
			||||||
		if (cursor.start < adev->gmc.visible_vram_size)
 | 
							if (cursor.start < adev->gmc.visible_vram_size)
 | 
				
			||||||
			return true;
 | 
								return true;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -66,14 +66,18 @@ static DEVICE_ATTR_RO(mem_info_preempt_used);
 | 
				
			||||||
static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
 | 
					static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
 | 
				
			||||||
				  struct ttm_buffer_object *tbo,
 | 
									  struct ttm_buffer_object *tbo,
 | 
				
			||||||
				  const struct ttm_place *place,
 | 
									  const struct ttm_place *place,
 | 
				
			||||||
				  struct ttm_resource *mem)
 | 
									  struct ttm_resource **res)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
 | 
						struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic64_add(mem->num_pages, &mgr->used);
 | 
						*res = kzalloc(sizeof(**res), GFP_KERNEL);
 | 
				
			||||||
 | 
						if (*res)
 | 
				
			||||||
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mem->mm_node = NULL;
 | 
						ttm_resource_init(tbo, place, *res);
 | 
				
			||||||
	mem->start = AMDGPU_BO_INVALID_OFFSET;
 | 
						(*res)->start = AMDGPU_BO_INVALID_OFFSET;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						atomic64_add((*res)->num_pages, &mgr->used);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -86,11 +90,12 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
 | 
				
			||||||
 * Free the allocated GTT again.
 | 
					 * Free the allocated GTT again.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
 | 
					static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
 | 
				
			||||||
				   struct ttm_resource *mem)
 | 
									   struct ttm_resource *res)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
 | 
						struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic64_sub(mem->num_pages, &mgr->used);
 | 
						atomic64_sub(res->num_pages, &mgr->used);
 | 
				
			||||||
 | 
						kfree(res);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -28,6 +28,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <drm/drm_mm.h>
 | 
					#include <drm/drm_mm.h>
 | 
				
			||||||
#include <drm/ttm/ttm_resource.h>
 | 
					#include <drm/ttm/ttm_resource.h>
 | 
				
			||||||
 | 
					#include <drm/ttm/ttm_range_manager.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* state back for walking over vram_mgr and gtt_mgr allocations */
 | 
					/* state back for walking over vram_mgr and gtt_mgr allocations */
 | 
				
			||||||
struct amdgpu_res_cursor {
 | 
					struct amdgpu_res_cursor {
 | 
				
			||||||
| 
						 | 
					@ -53,7 +54,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct drm_mm_node *node;
 | 
						struct drm_mm_node *node;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!res || !res->mm_node) {
 | 
						if (!res) {
 | 
				
			||||||
		cur->start = start;
 | 
							cur->start = start;
 | 
				
			||||||
		cur->size = size;
 | 
							cur->size = size;
 | 
				
			||||||
		cur->remaining = size;
 | 
							cur->remaining = size;
 | 
				
			||||||
| 
						 | 
					@ -63,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
 | 
						BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	node = res->mm_node;
 | 
						node = to_ttm_range_mgr_node(res)->mm_nodes;
 | 
				
			||||||
	while (start >= node->size << PAGE_SHIFT)
 | 
						while (start >= node->size << PAGE_SHIFT)
 | 
				
			||||||
		start -= node++->size << PAGE_SHIFT;
 | 
							start -= node++->size << PAGE_SHIFT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -210,10 +210,10 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* always sync to the exclusive fence */
 | 
						/* always sync to the exclusive fence */
 | 
				
			||||||
	f = dma_resv_get_excl(resv);
 | 
						f = dma_resv_excl_fence(resv);
 | 
				
			||||||
	r = amdgpu_sync_fence(sync, f);
 | 
						r = amdgpu_sync_fence(sync, f);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	flist = dma_resv_get_list(resv);
 | 
						flist = dma_resv_shared_list(resv);
 | 
				
			||||||
	if (!flist || r)
 | 
						if (!flist || r)
 | 
				
			||||||
		return r;
 | 
							return r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -127,8 +127,8 @@ TRACE_EVENT(amdgpu_bo_create,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	    TP_fast_assign(
 | 
						    TP_fast_assign(
 | 
				
			||||||
			   __entry->bo = bo;
 | 
								   __entry->bo = bo;
 | 
				
			||||||
			   __entry->pages = bo->tbo.mem.num_pages;
 | 
								   __entry->pages = bo->tbo.resource->num_pages;
 | 
				
			||||||
			   __entry->type = bo->tbo.mem.mem_type;
 | 
								   __entry->type = bo->tbo.resource->mem_type;
 | 
				
			||||||
			   __entry->prefer = bo->preferred_domains;
 | 
								   __entry->prefer = bo->preferred_domains;
 | 
				
			||||||
			   __entry->allow = bo->allowed_domains;
 | 
								   __entry->allow = bo->allowed_domains;
 | 
				
			||||||
			   __entry->visible = bo->flags;
 | 
								   __entry->visible = bo->flags;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -45,6 +45,7 @@
 | 
				
			||||||
#include <drm/ttm/ttm_bo_api.h>
 | 
					#include <drm/ttm/ttm_bo_api.h>
 | 
				
			||||||
#include <drm/ttm/ttm_bo_driver.h>
 | 
					#include <drm/ttm/ttm_bo_driver.h>
 | 
				
			||||||
#include <drm/ttm/ttm_placement.h>
 | 
					#include <drm/ttm/ttm_placement.h>
 | 
				
			||||||
 | 
					#include <drm/ttm/ttm_range_manager.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <drm/amdgpu_drm.h>
 | 
					#include <drm/amdgpu_drm.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -125,7 +126,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 | 
				
			||||||
		rcu_read_unlock();
 | 
							rcu_read_unlock();
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	switch (bo->mem.mem_type) {
 | 
					
 | 
				
			||||||
 | 
						switch (bo->resource->mem_type) {
 | 
				
			||||||
	case AMDGPU_PL_GDS:
 | 
						case AMDGPU_PL_GDS:
 | 
				
			||||||
	case AMDGPU_PL_GWS:
 | 
						case AMDGPU_PL_GWS:
 | 
				
			||||||
	case AMDGPU_PL_OA:
 | 
						case AMDGPU_PL_OA:
 | 
				
			||||||
| 
						 | 
					@ -460,7 +462,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct amdgpu_device *adev;
 | 
						struct amdgpu_device *adev;
 | 
				
			||||||
	struct amdgpu_bo *abo;
 | 
						struct amdgpu_bo *abo;
 | 
				
			||||||
	struct ttm_resource *old_mem = &bo->mem;
 | 
						struct ttm_resource *old_mem = bo->resource;
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (new_mem->mem_type == TTM_PL_TT ||
 | 
						if (new_mem->mem_type == TTM_PL_TT ||
 | 
				
			||||||
| 
						 | 
					@ -495,7 +497,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 | 
				
			||||||
			return r;
 | 
								return r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
 | 
							amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
 | 
				
			||||||
		ttm_resource_free(bo, &bo->mem);
 | 
							ttm_resource_free(bo, &bo->resource);
 | 
				
			||||||
		ttm_bo_assign_mem(bo, new_mem);
 | 
							ttm_bo_assign_mem(bo, new_mem);
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -605,7 +607,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
 | 
				
			||||||
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 | 
						struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 | 
				
			||||||
	struct amdgpu_res_cursor cursor;
 | 
						struct amdgpu_res_cursor cursor;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
 | 
						amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
 | 
				
			||||||
 | 
								 &cursor);
 | 
				
			||||||
	return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
 | 
						return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -954,50 +957,50 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 | 
				
			||||||
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 | 
						struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 | 
				
			||||||
	struct ttm_operation_ctx ctx = { false, false };
 | 
						struct ttm_operation_ctx ctx = { false, false };
 | 
				
			||||||
	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
 | 
						struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
 | 
				
			||||||
	struct ttm_resource tmp;
 | 
					 | 
				
			||||||
	struct ttm_placement placement;
 | 
						struct ttm_placement placement;
 | 
				
			||||||
	struct ttm_place placements;
 | 
						struct ttm_place placements;
 | 
				
			||||||
 | 
						struct ttm_resource *tmp;
 | 
				
			||||||
	uint64_t addr, flags;
 | 
						uint64_t addr, flags;
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
 | 
						if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	addr = amdgpu_gmc_agp_addr(bo);
 | 
						addr = amdgpu_gmc_agp_addr(bo);
 | 
				
			||||||
	if (addr != AMDGPU_BO_INVALID_OFFSET) {
 | 
						if (addr != AMDGPU_BO_INVALID_OFFSET) {
 | 
				
			||||||
		bo->mem.start = addr >> PAGE_SHIFT;
 | 
							bo->resource->start = addr >> PAGE_SHIFT;
 | 
				
			||||||
	} else {
 | 
							return 0;
 | 
				
			||||||
 | 
					 | 
				
			||||||
		/* allocate GART space */
 | 
					 | 
				
			||||||
		placement.num_placement = 1;
 | 
					 | 
				
			||||||
		placement.placement = &placements;
 | 
					 | 
				
			||||||
		placement.num_busy_placement = 1;
 | 
					 | 
				
			||||||
		placement.busy_placement = &placements;
 | 
					 | 
				
			||||||
		placements.fpfn = 0;
 | 
					 | 
				
			||||||
		placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
 | 
					 | 
				
			||||||
		placements.mem_type = TTM_PL_TT;
 | 
					 | 
				
			||||||
		placements.flags = bo->mem.placement;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
 | 
					 | 
				
			||||||
		if (unlikely(r))
 | 
					 | 
				
			||||||
			return r;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		/* compute PTE flags for this buffer object */
 | 
					 | 
				
			||||||
		flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		/* Bind pages */
 | 
					 | 
				
			||||||
		gtt->offset = (u64)tmp.start << PAGE_SHIFT;
 | 
					 | 
				
			||||||
		r = amdgpu_ttm_gart_bind(adev, bo, flags);
 | 
					 | 
				
			||||||
		if (unlikely(r)) {
 | 
					 | 
				
			||||||
			ttm_resource_free(bo, &tmp);
 | 
					 | 
				
			||||||
			return r;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		amdgpu_gart_invalidate_tlb(adev);
 | 
					 | 
				
			||||||
		ttm_resource_free(bo, &bo->mem);
 | 
					 | 
				
			||||||
		bo->mem = tmp;
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* allocate GART space */
 | 
				
			||||||
 | 
						placement.num_placement = 1;
 | 
				
			||||||
 | 
						placement.placement = &placements;
 | 
				
			||||||
 | 
						placement.num_busy_placement = 1;
 | 
				
			||||||
 | 
						placement.busy_placement = &placements;
 | 
				
			||||||
 | 
						placements.fpfn = 0;
 | 
				
			||||||
 | 
						placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
 | 
				
			||||||
 | 
						placements.mem_type = TTM_PL_TT;
 | 
				
			||||||
 | 
						placements.flags = bo->resource->placement;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
 | 
				
			||||||
 | 
						if (unlikely(r))
 | 
				
			||||||
 | 
							return r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* compute PTE flags for this buffer object */
 | 
				
			||||||
 | 
						flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Bind pages */
 | 
				
			||||||
 | 
						gtt->offset = (u64)tmp->start << PAGE_SHIFT;
 | 
				
			||||||
 | 
						r = amdgpu_ttm_gart_bind(adev, bo, flags);
 | 
				
			||||||
 | 
						if (unlikely(r)) {
 | 
				
			||||||
 | 
							ttm_resource_free(bo, &tmp);
 | 
				
			||||||
 | 
							return r;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						amdgpu_gart_invalidate_tlb(adev);
 | 
				
			||||||
 | 
						ttm_resource_free(bo, &bo->resource);
 | 
				
			||||||
 | 
						ttm_bo_assign_mem(bo, tmp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1016,7 +1019,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
 | 
				
			||||||
	if (!tbo->ttm)
 | 
						if (!tbo->ttm)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
 | 
						flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
 | 
				
			||||||
	r = amdgpu_ttm_gart_bind(adev, tbo, flags);
 | 
						r = amdgpu_ttm_gart_bind(adev, tbo, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return r;
 | 
						return r;
 | 
				
			||||||
| 
						 | 
					@ -1330,12 +1333,16 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 | 
				
			||||||
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 | 
					static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 | 
				
			||||||
					    const struct ttm_place *place)
 | 
										    const struct ttm_place *place)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long num_pages = bo->mem.num_pages;
 | 
						unsigned long num_pages = bo->resource->num_pages;
 | 
				
			||||||
	struct amdgpu_res_cursor cursor;
 | 
						struct amdgpu_res_cursor cursor;
 | 
				
			||||||
	struct dma_resv_list *flist;
 | 
						struct dma_resv_list *flist;
 | 
				
			||||||
	struct dma_fence *f;
 | 
						struct dma_fence *f;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Swapout? */
 | 
				
			||||||
 | 
						if (bo->resource->mem_type == TTM_PL_SYSTEM)
 | 
				
			||||||
 | 
							return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->type == ttm_bo_type_kernel &&
 | 
						if (bo->type == ttm_bo_type_kernel &&
 | 
				
			||||||
	    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
 | 
						    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
| 
						 | 
					@ -1344,7 +1351,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 | 
				
			||||||
	 * If true, then return false as any KFD process needs all its BOs to
 | 
						 * If true, then return false as any KFD process needs all its BOs to
 | 
				
			||||||
	 * be resident to run successfully
 | 
						 * be resident to run successfully
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	flist = dma_resv_get_list(bo->base.resv);
 | 
						flist = dma_resv_shared_list(bo->base.resv);
 | 
				
			||||||
	if (flist) {
 | 
						if (flist) {
 | 
				
			||||||
		for (i = 0; i < flist->shared_count; ++i) {
 | 
							for (i = 0; i < flist->shared_count; ++i) {
 | 
				
			||||||
			f = rcu_dereference_protected(flist->shared[i],
 | 
								f = rcu_dereference_protected(flist->shared[i],
 | 
				
			||||||
| 
						 | 
					@ -1354,7 +1361,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (bo->mem.mem_type) {
 | 
						switch (bo->resource->mem_type) {
 | 
				
			||||||
	case AMDGPU_PL_PREEMPT:
 | 
						case AMDGPU_PL_PREEMPT:
 | 
				
			||||||
		/* Preemptible BOs don't own system resources managed by the
 | 
							/* Preemptible BOs don't own system resources managed by the
 | 
				
			||||||
		 * driver (pages, VRAM, GART space). They point to resources
 | 
							 * driver (pages, VRAM, GART space). They point to resources
 | 
				
			||||||
| 
						 | 
					@ -1372,7 +1379,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	case TTM_PL_VRAM:
 | 
						case TTM_PL_VRAM:
 | 
				
			||||||
		/* Check each drm MM node individually */
 | 
							/* Check each drm MM node individually */
 | 
				
			||||||
		amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT,
 | 
							amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
 | 
				
			||||||
				 &cursor);
 | 
									 &cursor);
 | 
				
			||||||
		while (cursor.remaining) {
 | 
							while (cursor.remaining) {
 | 
				
			||||||
			if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
 | 
								if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
 | 
				
			||||||
| 
						 | 
					@ -1414,10 +1421,10 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
 | 
				
			||||||
	uint32_t value = 0;
 | 
						uint32_t value = 0;
 | 
				
			||||||
	int ret = 0;
 | 
						int ret = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->mem.mem_type != TTM_PL_VRAM)
 | 
						if (bo->resource->mem_type != TTM_PL_VRAM)
 | 
				
			||||||
		return -EIO;
 | 
							return -EIO;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_res_first(&bo->mem, offset, len, &cursor);
 | 
						amdgpu_res_first(bo->resource, offset, len, &cursor);
 | 
				
			||||||
	while (cursor.remaining) {
 | 
						while (cursor.remaining) {
 | 
				
			||||||
		uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
 | 
							uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
 | 
				
			||||||
		uint64_t bytes = 4 - (cursor.start & 3);
 | 
							uint64_t bytes = 4 - (cursor.start & 3);
 | 
				
			||||||
| 
						 | 
					@ -1942,21 +1949,21 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->tbo.mem.mem_type == AMDGPU_PL_PREEMPT) {
 | 
						if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
 | 
				
			||||||
		DRM_ERROR("Trying to clear preemptible memory.\n");
 | 
							DRM_ERROR("Trying to clear preemptible memory.\n");
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->tbo.mem.mem_type == TTM_PL_TT) {
 | 
						if (bo->tbo.resource->mem_type == TTM_PL_TT) {
 | 
				
			||||||
		r = amdgpu_ttm_alloc_gart(&bo->tbo);
 | 
							r = amdgpu_ttm_alloc_gart(&bo->tbo);
 | 
				
			||||||
		if (r)
 | 
							if (r)
 | 
				
			||||||
			return r;
 | 
								return r;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;
 | 
						num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
 | 
				
			||||||
	num_loops = 0;
 | 
						num_loops = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
 | 
						amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
 | 
				
			||||||
	while (cursor.remaining) {
 | 
						while (cursor.remaining) {
 | 
				
			||||||
		num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
 | 
							num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
 | 
				
			||||||
		amdgpu_res_next(&cursor, cursor.size);
 | 
							amdgpu_res_next(&cursor, cursor.size);
 | 
				
			||||||
| 
						 | 
					@ -1981,12 +1988,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
 | 
						amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
 | 
				
			||||||
	while (cursor.remaining) {
 | 
						while (cursor.remaining) {
 | 
				
			||||||
		uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
 | 
							uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
 | 
				
			||||||
		uint64_t dst_addr = cursor.start;
 | 
							uint64_t dst_addr = cursor.start;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
 | 
							dst_addr += amdgpu_ttm_domain_start(adev,
 | 
				
			||||||
 | 
											    bo->tbo.resource->mem_type);
 | 
				
			||||||
		amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
 | 
							amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
 | 
				
			||||||
					cur_size);
 | 
										cur_size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1125,9 +1125,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 | 
				
			||||||
	ib->length_dw = 16;
 | 
						ib->length_dw = 16;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (direct) {
 | 
						if (direct) {
 | 
				
			||||||
		r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
 | 
							r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
 | 
				
			||||||
							true, false,
 | 
										  msecs_to_jiffies(10));
 | 
				
			||||||
							msecs_to_jiffies(10));
 | 
					 | 
				
			||||||
		if (r == 0)
 | 
							if (r == 0)
 | 
				
			||||||
			r = -ETIMEDOUT;
 | 
								r = -ETIMEDOUT;
 | 
				
			||||||
		if (r < 0)
 | 
							if (r < 0)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -342,7 +342,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 | 
				
			||||||
		amdgpu_vm_bo_idle(base);
 | 
							amdgpu_vm_bo_idle(base);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->preferred_domains &
 | 
						if (bo->preferred_domains &
 | 
				
			||||||
	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
 | 
						    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -658,10 +658,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 | 
				
			||||||
		if (!bo->parent)
 | 
							if (!bo->parent)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
 | 
							ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
 | 
				
			||||||
					&vm->lru_bulk_move);
 | 
										&vm->lru_bulk_move);
 | 
				
			||||||
		if (shadow)
 | 
							if (shadow)
 | 
				
			||||||
			ttm_bo_move_to_lru_tail(&shadow->tbo, &shadow->tbo.mem,
 | 
								ttm_bo_move_to_lru_tail(&shadow->tbo,
 | 
				
			||||||
 | 
											shadow->tbo.resource,
 | 
				
			||||||
						&vm->lru_bulk_move);
 | 
											&vm->lru_bulk_move);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	spin_unlock(&adev->mman.bdev.lru_lock);
 | 
						spin_unlock(&adev->mman.bdev.lru_lock);
 | 
				
			||||||
| 
						 | 
					@ -1858,10 +1859,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 | 
				
			||||||
			struct drm_gem_object *gobj = dma_buf->priv;
 | 
								struct drm_gem_object *gobj = dma_buf->priv;
 | 
				
			||||||
			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
 | 
								struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (abo->tbo.mem.mem_type == TTM_PL_VRAM)
 | 
								if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
 | 
				
			||||||
				bo = gem_to_amdgpu_bo(gobj);
 | 
									bo = gem_to_amdgpu_bo(gobj);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		mem = &bo->tbo.mem;
 | 
							mem = bo->tbo.resource;
 | 
				
			||||||
		if (mem->mem_type == TTM_PL_TT ||
 | 
							if (mem->mem_type == TTM_PL_TT ||
 | 
				
			||||||
		    mem->mem_type == AMDGPU_PL_PREEMPT)
 | 
							    mem->mem_type == AMDGPU_PL_PREEMPT)
 | 
				
			||||||
			pages_addr = bo->tbo.ttm->dma_address;
 | 
								pages_addr = bo->tbo.ttm->dma_address;
 | 
				
			||||||
| 
						 | 
					@ -1922,7 +1923,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 | 
				
			||||||
	 * next command submission.
 | 
						 * next command submission.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
 | 
						if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
 | 
				
			||||||
		uint32_t mem_type = bo->tbo.mem.mem_type;
 | 
							uint32_t mem_type = bo->tbo.resource->mem_type;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!(bo->preferred_domains &
 | 
							if (!(bo->preferred_domains &
 | 
				
			||||||
		      amdgpu_mem_type_to_domain(mem_type)))
 | 
							      amdgpu_mem_type_to_domain(mem_type)))
 | 
				
			||||||
| 
						 | 
					@ -2063,13 +2064,12 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 | 
				
			||||||
	unsigned i, shared_count;
 | 
						unsigned i, shared_count;
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = dma_resv_get_fences_rcu(resv, &excl,
 | 
						r = dma_resv_get_fences(resv, &excl, &shared_count, &shared);
 | 
				
			||||||
					      &shared_count, &shared);
 | 
					 | 
				
			||||||
	if (r) {
 | 
						if (r) {
 | 
				
			||||||
		/* Not enough memory to grab the fence list, as last resort
 | 
							/* Not enough memory to grab the fence list, as last resort
 | 
				
			||||||
		 * block for all the fences to complete.
 | 
							 * block for all the fences to complete.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		dma_resv_wait_timeout_rcu(resv, true, false,
 | 
							dma_resv_wait_timeout(resv, true, false,
 | 
				
			||||||
						    MAX_SCHEDULE_TIMEOUT);
 | 
											    MAX_SCHEDULE_TIMEOUT);
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -2681,7 +2681,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
 | 
				
			||||||
		return true;
 | 
							return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Don't evict VM page tables while they are busy */
 | 
						/* Don't evict VM page tables while they are busy */
 | 
				
			||||||
	if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
 | 
						if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Try to block ongoing updates */
 | 
						/* Try to block ongoing updates */
 | 
				
			||||||
| 
						 | 
					@ -2861,8 +2861,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 | 
					long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
 | 
						timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true,
 | 
				
			||||||
					    true, true, timeout);
 | 
										true, timeout);
 | 
				
			||||||
	if (timeout <= 0)
 | 
						if (timeout <= 0)
 | 
				
			||||||
		return timeout;
 | 
							return timeout;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -23,6 +23,8 @@
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <linux/dma-mapping.h>
 | 
					#include <linux/dma-mapping.h>
 | 
				
			||||||
 | 
					#include <drm/ttm/ttm_range_manager.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "amdgpu.h"
 | 
					#include "amdgpu.h"
 | 
				
			||||||
#include "amdgpu_vm.h"
 | 
					#include "amdgpu_vm.h"
 | 
				
			||||||
#include "amdgpu_res_cursor.h"
 | 
					#include "amdgpu_res_cursor.h"
 | 
				
			||||||
| 
						 | 
					@ -217,19 +219,20 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
 | 
				
			||||||
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
 | 
					u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 | 
						struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 | 
				
			||||||
	struct ttm_resource *mem = &bo->tbo.mem;
 | 
						struct ttm_resource *res = bo->tbo.resource;
 | 
				
			||||||
	struct drm_mm_node *nodes = mem->mm_node;
 | 
						unsigned pages = res->num_pages;
 | 
				
			||||||
	unsigned pages = mem->num_pages;
 | 
						struct drm_mm_node *mm;
 | 
				
			||||||
	u64 usage;
 | 
						u64 usage;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (amdgpu_gmc_vram_full_visible(&adev->gmc))
 | 
						if (amdgpu_gmc_vram_full_visible(&adev->gmc))
 | 
				
			||||||
		return amdgpu_bo_size(bo);
 | 
							return amdgpu_bo_size(bo);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
 | 
						if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
 | 
						mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
 | 
				
			||||||
		usage += amdgpu_vram_mgr_vis_size(adev, nodes);
 | 
						for (usage = 0; pages; pages -= mm->size, mm++)
 | 
				
			||||||
 | 
							usage += amdgpu_vram_mgr_vis_size(adev, mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return usage;
 | 
						return usage;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -365,15 +368,15 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
 | 
				
			||||||
static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 | 
					static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 | 
				
			||||||
			       struct ttm_buffer_object *tbo,
 | 
								       struct ttm_buffer_object *tbo,
 | 
				
			||||||
			       const struct ttm_place *place,
 | 
								       const struct ttm_place *place,
 | 
				
			||||||
			       struct ttm_resource *mem)
 | 
								       struct ttm_resource **res)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
 | 
						unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
 | 
				
			||||||
	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 | 
						struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 | 
				
			||||||
	struct amdgpu_device *adev = to_amdgpu_device(mgr);
 | 
						struct amdgpu_device *adev = to_amdgpu_device(mgr);
 | 
				
			||||||
	uint64_t vis_usage = 0, mem_bytes, max_bytes;
 | 
						uint64_t vis_usage = 0, mem_bytes, max_bytes;
 | 
				
			||||||
 | 
						struct ttm_range_mgr_node *node;
 | 
				
			||||||
	struct drm_mm *mm = &mgr->mm;
 | 
						struct drm_mm *mm = &mgr->mm;
 | 
				
			||||||
	enum drm_mm_insert_mode mode;
 | 
						enum drm_mm_insert_mode mode;
 | 
				
			||||||
	struct drm_mm_node *nodes;
 | 
					 | 
				
			||||||
	unsigned i;
 | 
						unsigned i;
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -386,10 +389,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 | 
				
			||||||
		max_bytes -= AMDGPU_VM_RESERVED_VRAM;
 | 
							max_bytes -= AMDGPU_VM_RESERVED_VRAM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* bail out quickly if there's likely not enough VRAM for this BO */
 | 
						/* bail out quickly if there's likely not enough VRAM for this BO */
 | 
				
			||||||
	mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
 | 
						mem_bytes = tbo->base.size;
 | 
				
			||||||
	if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
 | 
						if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
 | 
				
			||||||
		atomic64_sub(mem_bytes, &mgr->usage);
 | 
							r = -ENOSPC;
 | 
				
			||||||
		return -ENOSPC;
 | 
							goto error_sub;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
 | 
						if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
 | 
				
			||||||
| 
						 | 
					@ -404,22 +407,23 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
		pages_per_node = max_t(uint32_t, pages_per_node,
 | 
							pages_per_node = max_t(uint32_t, pages_per_node,
 | 
				
			||||||
				       tbo->page_alignment);
 | 
									       tbo->page_alignment);
 | 
				
			||||||
		num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
 | 
							num_nodes = DIV_ROUND_UP(PFN_UP(mem_bytes), pages_per_node);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
 | 
						node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
 | 
				
			||||||
			       GFP_KERNEL | __GFP_ZERO);
 | 
								GFP_KERNEL | __GFP_ZERO);
 | 
				
			||||||
	if (!nodes) {
 | 
						if (!node) {
 | 
				
			||||||
		atomic64_sub(mem_bytes, &mgr->usage);
 | 
							r = -ENOMEM;
 | 
				
			||||||
		return -ENOMEM;
 | 
							goto error_sub;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ttm_resource_init(tbo, place, &node->base);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mode = DRM_MM_INSERT_BEST;
 | 
						mode = DRM_MM_INSERT_BEST;
 | 
				
			||||||
	if (place->flags & TTM_PL_FLAG_TOPDOWN)
 | 
						if (place->flags & TTM_PL_FLAG_TOPDOWN)
 | 
				
			||||||
		mode = DRM_MM_INSERT_HIGH;
 | 
							mode = DRM_MM_INSERT_HIGH;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mem->start = 0;
 | 
						pages_left = node->base.num_pages;
 | 
				
			||||||
	pages_left = mem->num_pages;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Limit maximum size to 2GB due to SG table limitations */
 | 
						/* Limit maximum size to 2GB due to SG table limitations */
 | 
				
			||||||
	pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
 | 
						pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
 | 
				
			||||||
| 
						 | 
					@ -432,8 +436,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 | 
				
			||||||
		if (pages >= pages_per_node)
 | 
							if (pages >= pages_per_node)
 | 
				
			||||||
			alignment = pages_per_node;
 | 
								alignment = pages_per_node;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, alignment,
 | 
							r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
 | 
				
			||||||
						0, place->fpfn, lpfn, mode);
 | 
											alignment, 0, place->fpfn,
 | 
				
			||||||
 | 
											lpfn, mode);
 | 
				
			||||||
		if (unlikely(r)) {
 | 
							if (unlikely(r)) {
 | 
				
			||||||
			if (pages > pages_per_node) {
 | 
								if (pages > pages_per_node) {
 | 
				
			||||||
				if (is_power_of_2(pages))
 | 
									if (is_power_of_2(pages))
 | 
				
			||||||
| 
						 | 
					@ -442,11 +447,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 | 
				
			||||||
					pages = rounddown_pow_of_two(pages);
 | 
										pages = rounddown_pow_of_two(pages);
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			goto error;
 | 
								goto error_free;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
 | 
							vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
 | 
				
			||||||
		amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
 | 
							amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
 | 
				
			||||||
		pages_left -= pages;
 | 
							pages_left -= pages;
 | 
				
			||||||
		++i;
 | 
							++i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -456,19 +461,20 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 | 
				
			||||||
	spin_unlock(&mgr->lock);
 | 
						spin_unlock(&mgr->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (i == 1)
 | 
						if (i == 1)
 | 
				
			||||||
		mem->placement |= TTM_PL_FLAG_CONTIGUOUS;
 | 
							node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic64_add(vis_usage, &mgr->vis_usage);
 | 
						atomic64_add(vis_usage, &mgr->vis_usage);
 | 
				
			||||||
	mem->mm_node = nodes;
 | 
						*res = &node->base;
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
error:
 | 
					error_free:
 | 
				
			||||||
	while (i--)
 | 
						while (i--)
 | 
				
			||||||
		drm_mm_remove_node(&nodes[i]);
 | 
							drm_mm_remove_node(&node->mm_nodes[i]);
 | 
				
			||||||
	spin_unlock(&mgr->lock);
 | 
						spin_unlock(&mgr->lock);
 | 
				
			||||||
	atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
 | 
						kvfree(node);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kvfree(nodes);
 | 
					error_sub:
 | 
				
			||||||
 | 
						atomic64_sub(mem_bytes, &mgr->usage);
 | 
				
			||||||
	return r;
 | 
						return r;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -481,24 +487,22 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 | 
				
			||||||
 * Free the allocated VRAM again.
 | 
					 * Free the allocated VRAM again.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
 | 
					static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
 | 
				
			||||||
				struct ttm_resource *mem)
 | 
									struct ttm_resource *res)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 | 
				
			||||||
	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 | 
						struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 | 
				
			||||||
	struct amdgpu_device *adev = to_amdgpu_device(mgr);
 | 
						struct amdgpu_device *adev = to_amdgpu_device(mgr);
 | 
				
			||||||
	struct drm_mm_node *nodes = mem->mm_node;
 | 
					 | 
				
			||||||
	uint64_t usage = 0, vis_usage = 0;
 | 
						uint64_t usage = 0, vis_usage = 0;
 | 
				
			||||||
	unsigned pages = mem->num_pages;
 | 
						unsigned i, pages;
 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!mem->mm_node)
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock(&mgr->lock);
 | 
						spin_lock(&mgr->lock);
 | 
				
			||||||
	while (pages) {
 | 
						for (i = 0, pages = res->num_pages; pages;
 | 
				
			||||||
		pages -= nodes->size;
 | 
						     pages -= node->mm_nodes[i].size, ++i) {
 | 
				
			||||||
		drm_mm_remove_node(nodes);
 | 
							struct drm_mm_node *mm = &node->mm_nodes[i];
 | 
				
			||||||
		usage += nodes->size << PAGE_SHIFT;
 | 
					
 | 
				
			||||||
		vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
 | 
							drm_mm_remove_node(mm);
 | 
				
			||||||
		++nodes;
 | 
							usage += mm->size << PAGE_SHIFT;
 | 
				
			||||||
 | 
							vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	amdgpu_vram_mgr_do_reserve(man);
 | 
						amdgpu_vram_mgr_do_reserve(man);
 | 
				
			||||||
	spin_unlock(&mgr->lock);
 | 
						spin_unlock(&mgr->lock);
 | 
				
			||||||
| 
						 | 
					@ -506,8 +510,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
 | 
				
			||||||
	atomic64_sub(usage, &mgr->usage);
 | 
						atomic64_sub(usage, &mgr->usage);
 | 
				
			||||||
	atomic64_sub(vis_usage, &mgr->vis_usage);
 | 
						atomic64_sub(vis_usage, &mgr->vis_usage);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kvfree(mem->mm_node);
 | 
						kvfree(node);
 | 
				
			||||||
	mem->mm_node = NULL;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -524,7 +527,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
 | 
				
			||||||
 * Allocate and fill a sg table from a VRAM allocation.
 | 
					 * Allocate and fill a sg table from a VRAM allocation.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
 | 
					int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
 | 
				
			||||||
			      struct ttm_resource *mem,
 | 
								      struct ttm_resource *res,
 | 
				
			||||||
			      u64 offset, u64 length,
 | 
								      u64 offset, u64 length,
 | 
				
			||||||
			      struct device *dev,
 | 
								      struct device *dev,
 | 
				
			||||||
			      enum dma_data_direction dir,
 | 
								      enum dma_data_direction dir,
 | 
				
			||||||
| 
						 | 
					@ -540,7 +543,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Determine the number of DRM_MM nodes to export */
 | 
						/* Determine the number of DRM_MM nodes to export */
 | 
				
			||||||
	amdgpu_res_first(mem, offset, length, &cursor);
 | 
						amdgpu_res_first(res, offset, length, &cursor);
 | 
				
			||||||
	while (cursor.remaining) {
 | 
						while (cursor.remaining) {
 | 
				
			||||||
		num_entries++;
 | 
							num_entries++;
 | 
				
			||||||
		amdgpu_res_next(&cursor, cursor.size);
 | 
							amdgpu_res_next(&cursor, cursor.size);
 | 
				
			||||||
| 
						 | 
					@ -560,7 +563,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
 | 
				
			||||||
	 * and the number of bytes from it. Access the following
 | 
						 * and the number of bytes from it. Access the following
 | 
				
			||||||
	 * DRM_MM node(s) if more buffer needs to exported
 | 
						 * DRM_MM node(s) if more buffer needs to exported
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	amdgpu_res_first(mem, offset, length, &cursor);
 | 
						amdgpu_res_first(res, offset, length, &cursor);
 | 
				
			||||||
	for_each_sgtable_sg((*sgt), sg, i) {
 | 
						for_each_sgtable_sg((*sgt), sg, i) {
 | 
				
			||||||
		phys_addr_t phys = cursor.start + adev->gmc.aper_base;
 | 
							phys_addr_t phys = cursor.start + adev->gmc.aper_base;
 | 
				
			||||||
		size_t size = cursor.size;
 | 
							size_t size = cursor.size;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -29,6 +29,7 @@
 | 
				
			||||||
#include "amdgpu_object.h"
 | 
					#include "amdgpu_object.h"
 | 
				
			||||||
#include "amdgpu_vm.h"
 | 
					#include "amdgpu_vm.h"
 | 
				
			||||||
#include "amdgpu_mn.h"
 | 
					#include "amdgpu_mn.h"
 | 
				
			||||||
 | 
					#include "amdgpu_res_cursor.h"
 | 
				
			||||||
#include "kfd_priv.h"
 | 
					#include "kfd_priv.h"
 | 
				
			||||||
#include "kfd_svm.h"
 | 
					#include "kfd_svm.h"
 | 
				
			||||||
#include "kfd_migrate.h"
 | 
					#include "kfd_migrate.h"
 | 
				
			||||||
| 
						 | 
					@ -205,34 +206,6 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
 | 
				
			||||||
	return r;
 | 
						return r;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static uint64_t
 | 
					 | 
				
			||||||
svm_migrate_node_physical_addr(struct amdgpu_device *adev,
 | 
					 | 
				
			||||||
			       struct drm_mm_node **mm_node, uint64_t *offset)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct drm_mm_node *node = *mm_node;
 | 
					 | 
				
			||||||
	uint64_t pos = *offset;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (node->start == AMDGPU_BO_INVALID_OFFSET) {
 | 
					 | 
				
			||||||
		pr_debug("drm node is not validated\n");
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	pr_debug("vram node start 0x%llx npages 0x%llx\n", node->start,
 | 
					 | 
				
			||||||
		 node->size);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (pos >= node->size) {
 | 
					 | 
				
			||||||
		do  {
 | 
					 | 
				
			||||||
			pos -= node->size;
 | 
					 | 
				
			||||||
			node++;
 | 
					 | 
				
			||||||
		} while (pos >= node->size);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		*mm_node = node;
 | 
					 | 
				
			||||||
		*offset = pos;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return (node->start + pos) << PAGE_SHIFT;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
unsigned long
 | 
					unsigned long
 | 
				
			||||||
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
 | 
					svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -297,11 +270,9 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	uint64_t npages = migrate->cpages;
 | 
						uint64_t npages = migrate->cpages;
 | 
				
			||||||
	struct device *dev = adev->dev;
 | 
						struct device *dev = adev->dev;
 | 
				
			||||||
	struct drm_mm_node *node;
 | 
						struct amdgpu_res_cursor cursor;
 | 
				
			||||||
	dma_addr_t *src;
 | 
						dma_addr_t *src;
 | 
				
			||||||
	uint64_t *dst;
 | 
						uint64_t *dst;
 | 
				
			||||||
	uint64_t vram_addr;
 | 
					 | 
				
			||||||
	uint64_t offset;
 | 
					 | 
				
			||||||
	uint64_t i, j;
 | 
						uint64_t i, j;
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -317,19 +288,12 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	node = prange->ttm_res->mm_node;
 | 
						amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
 | 
				
			||||||
	offset = prange->offset;
 | 
								 npages << PAGE_SHIFT, &cursor);
 | 
				
			||||||
	vram_addr = svm_migrate_node_physical_addr(adev, &node, &offset);
 | 
					 | 
				
			||||||
	if (!vram_addr) {
 | 
					 | 
				
			||||||
		WARN_ONCE(1, "vram node address is 0\n");
 | 
					 | 
				
			||||||
		r = -ENOMEM;
 | 
					 | 
				
			||||||
		goto out;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = j = 0; i < npages; i++) {
 | 
						for (i = j = 0; i < npages; i++) {
 | 
				
			||||||
		struct page *spage;
 | 
							struct page *spage;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dst[i] = vram_addr + (j << PAGE_SHIFT);
 | 
							dst[i] = cursor.start + (j << PAGE_SHIFT);
 | 
				
			||||||
		migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
 | 
							migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
 | 
				
			||||||
		svm_migrate_get_vram_page(prange, migrate->dst[i]);
 | 
							svm_migrate_get_vram_page(prange, migrate->dst[i]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -354,18 +318,10 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 | 
				
			||||||
						mfence);
 | 
											mfence);
 | 
				
			||||||
				if (r)
 | 
									if (r)
 | 
				
			||||||
					goto out_free_vram_pages;
 | 
										goto out_free_vram_pages;
 | 
				
			||||||
				offset += j;
 | 
									amdgpu_res_next(&cursor, j << PAGE_SHIFT);
 | 
				
			||||||
				vram_addr = (node->start + offset) << PAGE_SHIFT;
 | 
					 | 
				
			||||||
				j = 0;
 | 
									j = 0;
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				offset++;
 | 
									amdgpu_res_next(&cursor, PAGE_SIZE);
 | 
				
			||||||
				vram_addr += PAGE_SIZE;
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			if (offset >= node->size) {
 | 
					 | 
				
			||||||
				node++;
 | 
					 | 
				
			||||||
				pr_debug("next node size 0x%llx\n", node->size);
 | 
					 | 
				
			||||||
				vram_addr = node->start << PAGE_SHIFT;
 | 
					 | 
				
			||||||
				offset = 0;
 | 
					 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -373,19 +329,15 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 | 
				
			||||||
		pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
 | 
							pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
 | 
				
			||||||
			 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
 | 
								 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (j + offset >= node->size - 1 && i < npages - 1) {
 | 
							if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
 | 
				
			||||||
			r = svm_migrate_copy_memory_gart(adev, src + i - j,
 | 
								r = svm_migrate_copy_memory_gart(adev, src + i - j,
 | 
				
			||||||
							 dst + i - j, j + 1,
 | 
												 dst + i - j, j + 1,
 | 
				
			||||||
							 FROM_RAM_TO_VRAM,
 | 
												 FROM_RAM_TO_VRAM,
 | 
				
			||||||
							 mfence);
 | 
												 mfence);
 | 
				
			||||||
			if (r)
 | 
								if (r)
 | 
				
			||||||
				goto out_free_vram_pages;
 | 
									goto out_free_vram_pages;
 | 
				
			||||||
 | 
								amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
 | 
				
			||||||
			node++;
 | 
								j= 0;
 | 
				
			||||||
			pr_debug("next node size 0x%llx\n", node->size);
 | 
					 | 
				
			||||||
			vram_addr = node->start << PAGE_SHIFT;
 | 
					 | 
				
			||||||
			offset = 0;
 | 
					 | 
				
			||||||
			j = 0;
 | 
					 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			j++;
 | 
								j++;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -409,7 +409,7 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
 | 
				
			||||||
			pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
 | 
								pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
 | 
				
			||||||
				 prange->svms, prange->start, prange->last);
 | 
									 prange->svms, prange->start, prange->last);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			prange->ttm_res = &prange->svm_bo->bo->tbo.mem;
 | 
								prange->ttm_res = prange->svm_bo->bo->tbo.resource;
 | 
				
			||||||
			return true;
 | 
								return true;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -515,7 +515,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	svm_bo->bo = bo;
 | 
						svm_bo->bo = bo;
 | 
				
			||||||
	prange->svm_bo = svm_bo;
 | 
						prange->svm_bo = svm_bo;
 | 
				
			||||||
	prange->ttm_res = &bo->tbo.mem;
 | 
						prange->ttm_res = bo->tbo.resource;
 | 
				
			||||||
	prange->offset = 0;
 | 
						prange->offset = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock(&svm_bo->list_lock);
 | 
						spin_lock(&svm_bo->list_lock);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -8447,9 +8447,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 | 
				
			||||||
		 * deadlock during GPU reset when this fence will not signal
 | 
							 * deadlock during GPU reset when this fence will not signal
 | 
				
			||||||
		 * but we hold reservation lock for the BO.
 | 
							 * but we hold reservation lock for the BO.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
 | 
							r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
 | 
				
			||||||
							false,
 | 
										  msecs_to_jiffies(5000));
 | 
				
			||||||
							msecs_to_jiffies(5000));
 | 
					 | 
				
			||||||
		if (unlikely(r <= 0))
 | 
							if (unlikely(r <= 0))
 | 
				
			||||||
			DRM_ERROR("Waiting for fences timed out!");
 | 
								DRM_ERROR("Waiting for fences timed out!");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -285,6 +285,16 @@ config DRM_TI_TFP410
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  Texas Instruments TFP410 DVI/HDMI Transmitter driver
 | 
						  Texas Instruments TFP410 DVI/HDMI Transmitter driver
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					config DRM_TI_SN65DSI83
 | 
				
			||||||
 | 
						tristate "TI SN65DSI83 and SN65DSI84 DSI to LVDS bridge"
 | 
				
			||||||
 | 
						depends on OF
 | 
				
			||||||
 | 
						select DRM_KMS_HELPER
 | 
				
			||||||
 | 
						select REGMAP_I2C
 | 
				
			||||||
 | 
						select DRM_PANEL
 | 
				
			||||||
 | 
						select DRM_MIPI_DSI
 | 
				
			||||||
 | 
						help
 | 
				
			||||||
 | 
						  Texas Instruments SN65DSI83 and SN65DSI84 DSI to LVDS Bridge driver
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config DRM_TI_SN65DSI86
 | 
					config DRM_TI_SN65DSI86
 | 
				
			||||||
	tristate "TI SN65DSI86 DSI to eDP bridge"
 | 
						tristate "TI SN65DSI86 DSI to eDP bridge"
 | 
				
			||||||
	depends on OF
 | 
						depends on OF
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -23,6 +23,7 @@ obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
 | 
				
			||||||
obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
 | 
					obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
 | 
				
			||||||
obj-$(CONFIG_DRM_TOSHIBA_TC358775) += tc358775.o
 | 
					obj-$(CONFIG_DRM_TOSHIBA_TC358775) += tc358775.o
 | 
				
			||||||
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
 | 
					obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
 | 
				
			||||||
 | 
					obj-$(CONFIG_DRM_TI_SN65DSI83) += ti-sn65dsi83.o
 | 
				
			||||||
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
 | 
					obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
 | 
				
			||||||
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
 | 
					obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
 | 
				
			||||||
obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
 | 
					obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										709
									
								
								drivers/gpu/drm/bridge/ti-sn65dsi83.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										709
									
								
								drivers/gpu/drm/bridge/ti-sn65dsi83.c
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,709 @@
 | 
				
			||||||
 | 
					// SPDX-License-Identifier: GPL-2.0
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * TI SN65DSI83,84,85 driver
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Currently supported:
 | 
				
			||||||
 | 
					 * - SN65DSI83
 | 
				
			||||||
 | 
					 *   = 1x Single-link DSI ~ 1x Single-link LVDS
 | 
				
			||||||
 | 
					 *   - Supported
 | 
				
			||||||
 | 
					 *   - Single-link LVDS mode tested
 | 
				
			||||||
 | 
					 * - SN65DSI84
 | 
				
			||||||
 | 
					 *   = 1x Single-link DSI ~ 2x Single-link or 1x Dual-link LVDS
 | 
				
			||||||
 | 
					 *   - Supported
 | 
				
			||||||
 | 
					 *   - Dual-link LVDS mode tested
 | 
				
			||||||
 | 
					 *   - 2x Single-link LVDS mode unsupported
 | 
				
			||||||
 | 
					 *     (should be easy to add by someone who has the HW)
 | 
				
			||||||
 | 
					 * - SN65DSI85
 | 
				
			||||||
 | 
					 *   = 2x Single-link or 1x Dual-link DSI ~ 2x Single-link or 1x Dual-link LVDS
 | 
				
			||||||
 | 
					 *   - Unsupported
 | 
				
			||||||
 | 
					 *     (should be easy to add by someone who has the HW)
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Copyright (C) 2021 Marek Vasut <marex@denx.de>
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Based on previous work of:
 | 
				
			||||||
 | 
					 * Valentin Raevsky <valentin@compulab.co.il>
 | 
				
			||||||
 | 
					 * Philippe Schenker <philippe.schenker@toradex.com>
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <linux/bits.h>
 | 
				
			||||||
 | 
					#include <linux/clk.h>
 | 
				
			||||||
 | 
					#include <linux/gpio/consumer.h>
 | 
				
			||||||
 | 
					#include <linux/i2c.h>
 | 
				
			||||||
 | 
					#include <linux/module.h>
 | 
				
			||||||
 | 
					#include <linux/of_device.h>
 | 
				
			||||||
 | 
					#include <linux/of_graph.h>
 | 
				
			||||||
 | 
					#include <linux/regmap.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <drm/drm_atomic_helper.h>
 | 
				
			||||||
 | 
					#include <drm/drm_bridge.h>
 | 
				
			||||||
 | 
					#include <drm/drm_mipi_dsi.h>
 | 
				
			||||||
 | 
					#include <drm/drm_of.h>
 | 
				
			||||||
 | 
					#include <drm/drm_panel.h>
 | 
				
			||||||
 | 
					#include <drm/drm_print.h>
 | 
				
			||||||
 | 
					#include <drm/drm_probe_helper.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* ID registers */
 | 
				
			||||||
 | 
					#define REG_ID(n)				(0x00 + (n))
 | 
				
			||||||
 | 
					/* Reset and clock registers */
 | 
				
			||||||
 | 
					#define REG_RC_RESET				0x09
 | 
				
			||||||
 | 
					#define  REG_RC_RESET_SOFT_RESET		BIT(0)
 | 
				
			||||||
 | 
					#define REG_RC_LVDS_PLL				0x0a
 | 
				
			||||||
 | 
					#define  REG_RC_LVDS_PLL_PLL_EN_STAT		BIT(7)
 | 
				
			||||||
 | 
					#define  REG_RC_LVDS_PLL_LVDS_CLK_RANGE(n)	(((n) & 0x7) << 1)
 | 
				
			||||||
 | 
					#define  REG_RC_LVDS_PLL_HS_CLK_SRC_DPHY	BIT(0)
 | 
				
			||||||
 | 
					#define REG_RC_DSI_CLK				0x0b
 | 
				
			||||||
 | 
					#define  REG_RC_DSI_CLK_DSI_CLK_DIVIDER(n)	(((n) & 0x1f) << 3)
 | 
				
			||||||
 | 
					#define  REG_RC_DSI_CLK_REFCLK_MULTIPLIER(n)	((n) & 0x3)
 | 
				
			||||||
 | 
					#define REG_RC_PLL_EN				0x0d
 | 
				
			||||||
 | 
					#define  REG_RC_PLL_EN_PLL_EN			BIT(0)
 | 
				
			||||||
 | 
					/* DSI registers */
 | 
				
			||||||
 | 
					#define REG_DSI_LANE				0x10
 | 
				
			||||||
 | 
					#define  REG_DSI_LANE_LEFT_RIGHT_PIXELS		BIT(7)	/* DSI85-only */
 | 
				
			||||||
 | 
					#define  REG_DSI_LANE_DSI_CHANNEL_MODE_DUAL	0	/* DSI85-only */
 | 
				
			||||||
 | 
					#define  REG_DSI_LANE_DSI_CHANNEL_MODE_2SINGLE	BIT(6)	/* DSI85-only */
 | 
				
			||||||
 | 
					#define  REG_DSI_LANE_DSI_CHANNEL_MODE_SINGLE	BIT(5)
 | 
				
			||||||
 | 
					#define  REG_DSI_LANE_CHA_DSI_LANES(n)		(((n) & 0x3) << 3)
 | 
				
			||||||
 | 
					#define  REG_DSI_LANE_CHB_DSI_LANES(n)		(((n) & 0x3) << 1)
 | 
				
			||||||
 | 
					#define  REG_DSI_LANE_SOT_ERR_TOL_DIS		BIT(0)
 | 
				
			||||||
 | 
					#define REG_DSI_EQ				0x11
 | 
				
			||||||
 | 
					#define  REG_DSI_EQ_CHA_DSI_DATA_EQ(n)		(((n) & 0x3) << 6)
 | 
				
			||||||
 | 
					#define  REG_DSI_EQ_CHA_DSI_CLK_EQ(n)		(((n) & 0x3) << 2)
 | 
				
			||||||
 | 
					#define REG_DSI_CLK				0x12
 | 
				
			||||||
 | 
					#define  REG_DSI_CLK_CHA_DSI_CLK_RANGE(n)	((n) & 0xff)
 | 
				
			||||||
 | 
					/* LVDS registers */
 | 
				
			||||||
 | 
					#define REG_LVDS_FMT				0x18
 | 
				
			||||||
 | 
					#define  REG_LVDS_FMT_DE_NEG_POLARITY		BIT(7)
 | 
				
			||||||
 | 
					#define  REG_LVDS_FMT_HS_NEG_POLARITY		BIT(6)
 | 
				
			||||||
 | 
					#define  REG_LVDS_FMT_VS_NEG_POLARITY		BIT(5)
 | 
				
			||||||
 | 
					#define  REG_LVDS_FMT_LVDS_LINK_CFG		BIT(4)	/* 0:AB 1:A-only */
 | 
				
			||||||
 | 
					#define  REG_LVDS_FMT_CHA_24BPP_MODE		BIT(3)
 | 
				
			||||||
 | 
					#define  REG_LVDS_FMT_CHB_24BPP_MODE		BIT(2)
 | 
				
			||||||
 | 
					#define  REG_LVDS_FMT_CHA_24BPP_FORMAT1		BIT(1)
 | 
				
			||||||
 | 
					#define  REG_LVDS_FMT_CHB_24BPP_FORMAT1		BIT(0)
 | 
				
			||||||
 | 
					#define REG_LVDS_VCOM				0x19
 | 
				
			||||||
 | 
					#define  REG_LVDS_VCOM_CHA_LVDS_VOCM		BIT(6)
 | 
				
			||||||
 | 
					#define  REG_LVDS_VCOM_CHB_LVDS_VOCM		BIT(4)
 | 
				
			||||||
 | 
					#define  REG_LVDS_VCOM_CHA_LVDS_VOD_SWING(n)	(((n) & 0x3) << 2)
 | 
				
			||||||
 | 
					#define  REG_LVDS_VCOM_CHB_LVDS_VOD_SWING(n)	((n) & 0x3)
 | 
				
			||||||
 | 
					#define REG_LVDS_LANE				0x1a
 | 
				
			||||||
 | 
					#define  REG_LVDS_LANE_EVEN_ODD_SWAP		BIT(6)
 | 
				
			||||||
 | 
					#define  REG_LVDS_LANE_CHA_REVERSE_LVDS		BIT(5)
 | 
				
			||||||
 | 
					#define  REG_LVDS_LANE_CHB_REVERSE_LVDS		BIT(4)
 | 
				
			||||||
 | 
					#define  REG_LVDS_LANE_CHA_LVDS_TERM		BIT(1)
 | 
				
			||||||
 | 
					#define  REG_LVDS_LANE_CHB_LVDS_TERM		BIT(0)
 | 
				
			||||||
 | 
					#define REG_LVDS_CM				0x1b
 | 
				
			||||||
 | 
					#define  REG_LVDS_CM_CHA_LVDS_CM_ADJUST(n)	(((n) & 0x3) << 4)
 | 
				
			||||||
 | 
					#define  REG_LVDS_CM_CHB_LVDS_CM_ADJUST(n)	((n) & 0x3)
 | 
				
			||||||
 | 
					/* Video registers */
 | 
				
			||||||
 | 
					#define REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW	0x20
 | 
				
			||||||
 | 
					#define REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH	0x21
 | 
				
			||||||
 | 
					#define REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW	0x24
 | 
				
			||||||
 | 
					#define REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH	0x25
 | 
				
			||||||
 | 
					#define REG_VID_CHA_SYNC_DELAY_LOW		0x28
 | 
				
			||||||
 | 
					#define REG_VID_CHA_SYNC_DELAY_HIGH		0x29
 | 
				
			||||||
 | 
					#define REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW	0x2c
 | 
				
			||||||
 | 
					#define REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH	0x2d
 | 
				
			||||||
 | 
					#define REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW	0x30
 | 
				
			||||||
 | 
					#define REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH	0x31
 | 
				
			||||||
 | 
					#define REG_VID_CHA_HORIZONTAL_BACK_PORCH	0x34
 | 
				
			||||||
 | 
					#define REG_VID_CHA_VERTICAL_BACK_PORCH		0x36
 | 
				
			||||||
 | 
					#define REG_VID_CHA_HORIZONTAL_FRONT_PORCH	0x38
 | 
				
			||||||
 | 
					#define REG_VID_CHA_VERTICAL_FRONT_PORCH	0x3a
 | 
				
			||||||
 | 
					#define REG_VID_CHA_TEST_PATTERN		0x3c
 | 
				
			||||||
 | 
					/* IRQ registers */
 | 
				
			||||||
 | 
					#define REG_IRQ_GLOBAL				0xe0
 | 
				
			||||||
 | 
					#define  REG_IRQ_GLOBAL_IRQ_EN			BIT(0)
 | 
				
			||||||
 | 
					#define REG_IRQ_EN				0xe1
 | 
				
			||||||
 | 
					#define  REG_IRQ_EN_CHA_SYNCH_ERR_EN		BIT(7)
 | 
				
			||||||
 | 
					#define  REG_IRQ_EN_CHA_CRC_ERR_EN		BIT(6)
 | 
				
			||||||
 | 
					#define  REG_IRQ_EN_CHA_UNC_ECC_ERR_EN		BIT(5)
 | 
				
			||||||
 | 
					#define  REG_IRQ_EN_CHA_COR_ECC_ERR_EN		BIT(4)
 | 
				
			||||||
 | 
					#define  REG_IRQ_EN_CHA_LLP_ERR_EN		BIT(3)
 | 
				
			||||||
 | 
					#define  REG_IRQ_EN_CHA_SOT_BIT_ERR_EN		BIT(2)
 | 
				
			||||||
 | 
					#define  REG_IRQ_EN_CHA_PLL_UNLOCK_EN		BIT(0)
 | 
				
			||||||
 | 
					#define REG_IRQ_STAT				0xe5
 | 
				
			||||||
 | 
					#define  REG_IRQ_STAT_CHA_SYNCH_ERR		BIT(7)
 | 
				
			||||||
 | 
					#define  REG_IRQ_STAT_CHA_CRC_ERR		BIT(6)
 | 
				
			||||||
 | 
					#define  REG_IRQ_STAT_CHA_UNC_ECC_ERR		BIT(5)
 | 
				
			||||||
 | 
					#define  REG_IRQ_STAT_CHA_COR_ECC_ERR		BIT(4)
 | 
				
			||||||
 | 
					#define  REG_IRQ_STAT_CHA_LLP_ERR		BIT(3)
 | 
				
			||||||
 | 
					#define  REG_IRQ_STAT_CHA_SOT_BIT_ERR		BIT(2)
 | 
				
			||||||
 | 
					#define  REG_IRQ_STAT_CHA_PLL_UNLOCK		BIT(0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum sn65dsi83_model {
 | 
				
			||||||
 | 
						MODEL_SN65DSI83,
 | 
				
			||||||
 | 
						MODEL_SN65DSI84,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct sn65dsi83 {
 | 
				
			||||||
 | 
						struct drm_bridge		bridge;
 | 
				
			||||||
 | 
						struct drm_display_mode		mode;
 | 
				
			||||||
 | 
						struct device			*dev;
 | 
				
			||||||
 | 
						struct regmap			*regmap;
 | 
				
			||||||
 | 
						struct device_node		*host_node;
 | 
				
			||||||
 | 
						struct mipi_dsi_device		*dsi;
 | 
				
			||||||
 | 
						struct drm_bridge		*panel_bridge;
 | 
				
			||||||
 | 
						struct gpio_desc		*enable_gpio;
 | 
				
			||||||
 | 
						int				dsi_lanes;
 | 
				
			||||||
 | 
						bool				lvds_dual_link;
 | 
				
			||||||
 | 
						bool				lvds_dual_link_even_odd_swap;
 | 
				
			||||||
 | 
						bool				lvds_format_24bpp;
 | 
				
			||||||
 | 
						bool				lvds_format_jeida;
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct regmap_range sn65dsi83_readable_ranges[] = {
 | 
				
			||||||
 | 
						regmap_reg_range(REG_ID(0), REG_ID(8)),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_RC_LVDS_PLL, REG_RC_DSI_CLK),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_RC_PLL_EN, REG_RC_PLL_EN),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_DSI_LANE, REG_DSI_CLK),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_LVDS_FMT, REG_LVDS_CM),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW,
 | 
				
			||||||
 | 
								 REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW,
 | 
				
			||||||
 | 
								 REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_SYNC_DELAY_LOW,
 | 
				
			||||||
 | 
								 REG_VID_CHA_SYNC_DELAY_HIGH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
 | 
				
			||||||
 | 
								 REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
 | 
				
			||||||
 | 
								 REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_HORIZONTAL_BACK_PORCH,
 | 
				
			||||||
 | 
								 REG_VID_CHA_HORIZONTAL_BACK_PORCH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_VERTICAL_BACK_PORCH,
 | 
				
			||||||
 | 
								 REG_VID_CHA_VERTICAL_BACK_PORCH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
 | 
				
			||||||
 | 
								 REG_VID_CHA_HORIZONTAL_FRONT_PORCH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_VERTICAL_FRONT_PORCH,
 | 
				
			||||||
 | 
								 REG_VID_CHA_VERTICAL_FRONT_PORCH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_TEST_PATTERN, REG_VID_CHA_TEST_PATTERN),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_IRQ_GLOBAL, REG_IRQ_EN),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT),
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct regmap_access_table sn65dsi83_readable_table = {
 | 
				
			||||||
 | 
						.yes_ranges = sn65dsi83_readable_ranges,
 | 
				
			||||||
 | 
						.n_yes_ranges = ARRAY_SIZE(sn65dsi83_readable_ranges),
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct regmap_range sn65dsi83_writeable_ranges[] = {
 | 
				
			||||||
 | 
						regmap_reg_range(REG_RC_RESET, REG_RC_DSI_CLK),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_RC_PLL_EN, REG_RC_PLL_EN),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_DSI_LANE, REG_DSI_CLK),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_LVDS_FMT, REG_LVDS_CM),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW,
 | 
				
			||||||
 | 
								 REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW,
 | 
				
			||||||
 | 
								 REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_SYNC_DELAY_LOW,
 | 
				
			||||||
 | 
								 REG_VID_CHA_SYNC_DELAY_HIGH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
 | 
				
			||||||
 | 
								 REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
 | 
				
			||||||
 | 
								 REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_HORIZONTAL_BACK_PORCH,
 | 
				
			||||||
 | 
								 REG_VID_CHA_HORIZONTAL_BACK_PORCH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_VERTICAL_BACK_PORCH,
 | 
				
			||||||
 | 
								 REG_VID_CHA_VERTICAL_BACK_PORCH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
 | 
				
			||||||
 | 
								 REG_VID_CHA_HORIZONTAL_FRONT_PORCH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_VERTICAL_FRONT_PORCH,
 | 
				
			||||||
 | 
								 REG_VID_CHA_VERTICAL_FRONT_PORCH),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_VID_CHA_TEST_PATTERN, REG_VID_CHA_TEST_PATTERN),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_IRQ_GLOBAL, REG_IRQ_EN),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT),
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct regmap_access_table sn65dsi83_writeable_table = {
 | 
				
			||||||
 | 
						.yes_ranges = sn65dsi83_writeable_ranges,
 | 
				
			||||||
 | 
						.n_yes_ranges = ARRAY_SIZE(sn65dsi83_writeable_ranges),
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct regmap_range sn65dsi83_volatile_ranges[] = {
 | 
				
			||||||
 | 
						regmap_reg_range(REG_RC_RESET, REG_RC_RESET),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_RC_LVDS_PLL, REG_RC_LVDS_PLL),
 | 
				
			||||||
 | 
						regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT),
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct regmap_access_table sn65dsi83_volatile_table = {
 | 
				
			||||||
 | 
						.yes_ranges = sn65dsi83_volatile_ranges,
 | 
				
			||||||
 | 
						.n_yes_ranges = ARRAY_SIZE(sn65dsi83_volatile_ranges),
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct regmap_config sn65dsi83_regmap_config = {
 | 
				
			||||||
 | 
						.reg_bits = 8,
 | 
				
			||||||
 | 
						.val_bits = 8,
 | 
				
			||||||
 | 
						.rd_table = &sn65dsi83_readable_table,
 | 
				
			||||||
 | 
						.wr_table = &sn65dsi83_writeable_table,
 | 
				
			||||||
 | 
						.volatile_table = &sn65dsi83_volatile_table,
 | 
				
			||||||
 | 
						.cache_type = REGCACHE_RBTREE,
 | 
				
			||||||
 | 
						.max_register = REG_IRQ_STAT,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return container_of(bridge, struct sn65dsi83, bridge);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int sn65dsi83_attach(struct drm_bridge *bridge,
 | 
				
			||||||
 | 
								    enum drm_bridge_attach_flags flags)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
 | 
				
			||||||
 | 
						struct device *dev = ctx->dev;
 | 
				
			||||||
 | 
						struct mipi_dsi_device *dsi;
 | 
				
			||||||
 | 
						struct mipi_dsi_host *host;
 | 
				
			||||||
 | 
						int ret = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						const struct mipi_dsi_device_info info = {
 | 
				
			||||||
 | 
							.type = "sn65dsi83",
 | 
				
			||||||
 | 
							.channel = 0,
 | 
				
			||||||
 | 
							.node = NULL,
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						host = of_find_mipi_dsi_host_by_node(ctx->host_node);
 | 
				
			||||||
 | 
						if (!host) {
 | 
				
			||||||
 | 
							dev_err(dev, "failed to find dsi host\n");
 | 
				
			||||||
 | 
							return -EPROBE_DEFER;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dsi = mipi_dsi_device_register_full(host, &info);
 | 
				
			||||||
 | 
						if (IS_ERR(dsi)) {
 | 
				
			||||||
 | 
							return dev_err_probe(dev, PTR_ERR(dsi),
 | 
				
			||||||
 | 
									     "failed to create dsi device\n");
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx->dsi = dsi;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dsi->lanes = ctx->dsi_lanes;
 | 
				
			||||||
 | 
						dsi->format = MIPI_DSI_FMT_RGB888;
 | 
				
			||||||
 | 
						dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = mipi_dsi_attach(dsi);
 | 
				
			||||||
 | 
						if (ret < 0) {
 | 
				
			||||||
 | 
							dev_err(dev, "failed to attach dsi to host\n");
 | 
				
			||||||
 | 
							goto err_dsi_attach;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
 | 
				
			||||||
 | 
									 &ctx->bridge, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					err_dsi_attach:
 | 
				
			||||||
 | 
						mipi_dsi_device_unregister(dsi);
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void sn65dsi83_pre_enable(struct drm_bridge *bridge)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Reset the chip, pull EN line low for t_reset=10ms,
 | 
				
			||||||
 | 
						 * then high for t_en=1ms.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						regcache_mark_dirty(ctx->regmap);
 | 
				
			||||||
 | 
						gpiod_set_value(ctx->enable_gpio, 0);
 | 
				
			||||||
 | 
						usleep_range(10000, 11000);
 | 
				
			||||||
 | 
						gpiod_set_value(ctx->enable_gpio, 1);
 | 
				
			||||||
 | 
						usleep_range(1000, 1100);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static u8 sn65dsi83_get_lvds_range(struct sn65dsi83 *ctx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * The encoding of the LVDS_CLK_RANGE is as follows:
 | 
				
			||||||
 | 
						 * 000 - 25 MHz <= LVDS_CLK < 37.5 MHz
 | 
				
			||||||
 | 
						 * 001 - 37.5 MHz <= LVDS_CLK < 62.5 MHz
 | 
				
			||||||
 | 
						 * 010 - 62.5 MHz <= LVDS_CLK < 87.5 MHz
 | 
				
			||||||
 | 
						 * 011 - 87.5 MHz <= LVDS_CLK < 112.5 MHz
 | 
				
			||||||
 | 
						 * 100 - 112.5 MHz <= LVDS_CLK < 137.5 MHz
 | 
				
			||||||
 | 
						 * 101 - 137.5 MHz <= LVDS_CLK <= 154 MHz
 | 
				
			||||||
 | 
						 * which is a range of 12.5MHz..162.5MHz in 50MHz steps, except that
 | 
				
			||||||
 | 
						 * the ends of the ranges are clamped to the supported range. Since
 | 
				
			||||||
 | 
						 * sn65dsi83_mode_valid() already filters the valid modes and limits
 | 
				
			||||||
 | 
						 * the clock to 25..154 MHz, the range calculation can be simplified
 | 
				
			||||||
 | 
						 * as follows:
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						int mode_clock = ctx->mode.clock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (ctx->lvds_dual_link)
 | 
				
			||||||
 | 
							mode_clock /= 2;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return (mode_clock - 12500) / 25000;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static u8 sn65dsi83_get_dsi_range(struct sn65dsi83 *ctx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * The encoding of the CHA_DSI_CLK_RANGE is as follows:
 | 
				
			||||||
 | 
						 * 0x00 through 0x07 - Reserved
 | 
				
			||||||
 | 
						 * 0x08 - 40 <= DSI_CLK < 45 MHz
 | 
				
			||||||
 | 
						 * 0x09 - 45 <= DSI_CLK < 50 MHz
 | 
				
			||||||
 | 
						 * ...
 | 
				
			||||||
 | 
						 * 0x63 - 495 <= DSI_CLK < 500 MHz
 | 
				
			||||||
 | 
						 * 0x64 - 500 MHz
 | 
				
			||||||
 | 
						 * 0x65 through 0xFF - Reserved
 | 
				
			||||||
 | 
						 * which is DSI clock in 5 MHz steps, clamped to 40..500 MHz.
 | 
				
			||||||
 | 
						 * The DSI clock are calculated as:
 | 
				
			||||||
 | 
						 *  DSI_CLK = mode clock * bpp / dsi_data_lanes / 2
 | 
				
			||||||
 | 
						 * the 2 is there because the bus is DDR.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						return DIV_ROUND_UP(clamp((unsigned int)ctx->mode.clock *
 | 
				
			||||||
 | 
								    mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) /
 | 
				
			||||||
 | 
								    ctx->dsi_lanes / 2, 40000U, 500000U), 5000U);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/* The divider is (DSI_CLK / LVDS_CLK) - 1, which really is: */
 | 
				
			||||||
 | 
						unsigned int dsi_div = mipi_dsi_pixel_format_to_bpp(ctx->dsi->format);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dsi_div /= ctx->dsi_lanes;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!ctx->lvds_dual_link)
 | 
				
			||||||
 | 
							dsi_div /= 2;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return dsi_div - 1;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void sn65dsi83_enable(struct drm_bridge *bridge)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
 | 
				
			||||||
 | 
						unsigned int pval;
 | 
				
			||||||
 | 
						u16 val;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Clear reset, disable PLL */
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_RC_RESET, 0x00);
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Reference clock derived from DSI link clock. */
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_RC_LVDS_PLL,
 | 
				
			||||||
 | 
							     REG_RC_LVDS_PLL_LVDS_CLK_RANGE(sn65dsi83_get_lvds_range(ctx)) |
 | 
				
			||||||
 | 
							     REG_RC_LVDS_PLL_HS_CLK_SRC_DPHY);
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_DSI_CLK,
 | 
				
			||||||
 | 
							     REG_DSI_CLK_CHA_DSI_CLK_RANGE(sn65dsi83_get_dsi_range(ctx)));
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_RC_DSI_CLK,
 | 
				
			||||||
 | 
							     REG_RC_DSI_CLK_DSI_CLK_DIVIDER(sn65dsi83_get_dsi_div(ctx)));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Set number of DSI lanes and LVDS link config. */
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_DSI_LANE,
 | 
				
			||||||
 | 
							     REG_DSI_LANE_DSI_CHANNEL_MODE_SINGLE |
 | 
				
			||||||
 | 
							     REG_DSI_LANE_CHA_DSI_LANES(~(ctx->dsi_lanes - 1)) |
 | 
				
			||||||
 | 
							     /* CHB is DSI85-only, set to default on DSI83/DSI84 */
 | 
				
			||||||
 | 
							     REG_DSI_LANE_CHB_DSI_LANES(3));
 | 
				
			||||||
 | 
						/* No equalization. */
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_DSI_EQ, 0x00);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Set up sync signal polarity. */
 | 
				
			||||||
 | 
						val = (ctx->mode.flags & DRM_MODE_FLAG_NHSYNC ?
 | 
				
			||||||
 | 
						       REG_LVDS_FMT_HS_NEG_POLARITY : 0) |
 | 
				
			||||||
 | 
						      (ctx->mode.flags & DRM_MODE_FLAG_NVSYNC ?
 | 
				
			||||||
 | 
						       REG_LVDS_FMT_VS_NEG_POLARITY : 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Set up bits-per-pixel, 18bpp or 24bpp. */
 | 
				
			||||||
 | 
						if (ctx->lvds_format_24bpp) {
 | 
				
			||||||
 | 
							val |= REG_LVDS_FMT_CHA_24BPP_MODE;
 | 
				
			||||||
 | 
							if (ctx->lvds_dual_link)
 | 
				
			||||||
 | 
								val |= REG_LVDS_FMT_CHB_24BPP_MODE;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Set up LVDS format, JEIDA/Format 1 or SPWG/Format 2 */
 | 
				
			||||||
 | 
						if (ctx->lvds_format_jeida) {
 | 
				
			||||||
 | 
							val |= REG_LVDS_FMT_CHA_24BPP_FORMAT1;
 | 
				
			||||||
 | 
							if (ctx->lvds_dual_link)
 | 
				
			||||||
 | 
								val |= REG_LVDS_FMT_CHB_24BPP_FORMAT1;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Set up LVDS output config (DSI84,DSI85) */
 | 
				
			||||||
 | 
						if (!ctx->lvds_dual_link)
 | 
				
			||||||
 | 
							val |= REG_LVDS_FMT_LVDS_LINK_CFG;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_LVDS_FMT, val);
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_LVDS_VCOM, 0x05);
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_LVDS_LANE,
 | 
				
			||||||
 | 
							     (ctx->lvds_dual_link_even_odd_swap ?
 | 
				
			||||||
 | 
							      REG_LVDS_LANE_EVEN_ODD_SWAP : 0) |
 | 
				
			||||||
 | 
							     REG_LVDS_LANE_CHA_LVDS_TERM |
 | 
				
			||||||
 | 
							     REG_LVDS_LANE_CHB_LVDS_TERM);
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_LVDS_CM, 0x00);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						val = cpu_to_le16(ctx->mode.hdisplay);
 | 
				
			||||||
 | 
						regmap_bulk_write(ctx->regmap, REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW,
 | 
				
			||||||
 | 
								  &val, 2);
 | 
				
			||||||
 | 
						val = cpu_to_le16(ctx->mode.vdisplay);
 | 
				
			||||||
 | 
						regmap_bulk_write(ctx->regmap, REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW,
 | 
				
			||||||
 | 
								  &val, 2);
 | 
				
			||||||
 | 
						/* 32 + 1 pixel clock to ensure proper operation */
 | 
				
			||||||
 | 
						val = cpu_to_le16(32 + 1);
 | 
				
			||||||
 | 
						regmap_bulk_write(ctx->regmap, REG_VID_CHA_SYNC_DELAY_LOW, &val, 2);
 | 
				
			||||||
 | 
						val = cpu_to_le16(ctx->mode.hsync_end - ctx->mode.hsync_start);
 | 
				
			||||||
 | 
						regmap_bulk_write(ctx->regmap, REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
 | 
				
			||||||
 | 
								  &val, 2);
 | 
				
			||||||
 | 
						val = cpu_to_le16(ctx->mode.vsync_end - ctx->mode.vsync_start);
 | 
				
			||||||
 | 
						regmap_bulk_write(ctx->regmap, REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
 | 
				
			||||||
 | 
								  &val, 2);
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_BACK_PORCH,
 | 
				
			||||||
 | 
							     ctx->mode.htotal - ctx->mode.hsync_end);
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_BACK_PORCH,
 | 
				
			||||||
 | 
							     ctx->mode.vtotal - ctx->mode.vsync_end);
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
 | 
				
			||||||
 | 
							     ctx->mode.hsync_start - ctx->mode.hdisplay);
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_FRONT_PORCH,
 | 
				
			||||||
 | 
							     ctx->mode.vsync_start - ctx->mode.vdisplay);
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_VID_CHA_TEST_PATTERN, 0x00);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Enable PLL */
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_RC_PLL_EN, REG_RC_PLL_EN_PLL_EN);
 | 
				
			||||||
 | 
						usleep_range(3000, 4000);
 | 
				
			||||||
 | 
						ret = regmap_read_poll_timeout(ctx->regmap, REG_RC_LVDS_PLL, pval,
 | 
				
			||||||
 | 
									       pval & REG_RC_LVDS_PLL_PLL_EN_STAT,
 | 
				
			||||||
 | 
									       1000, 100000);
 | 
				
			||||||
 | 
						if (ret) {
 | 
				
			||||||
 | 
							dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret);
 | 
				
			||||||
 | 
							/* On failure, disable PLL again and exit. */
 | 
				
			||||||
 | 
							regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Trigger reset after CSR register update. */
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_RC_RESET, REG_RC_RESET_SOFT_RESET);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Clear all errors that got asserted during initialization. */
 | 
				
			||||||
 | 
						regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_IRQ_STAT, pval);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void sn65dsi83_disable(struct drm_bridge *bridge)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Clear reset, disable PLL */
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_RC_RESET, 0x00);
 | 
				
			||||||
 | 
						regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void sn65dsi83_post_disable(struct drm_bridge *bridge)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Put the chip in reset, pull EN line low. */
 | 
				
			||||||
 | 
						gpiod_set_value(ctx->enable_gpio, 0);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static enum drm_mode_status
 | 
				
			||||||
 | 
					sn65dsi83_mode_valid(struct drm_bridge *bridge,
 | 
				
			||||||
 | 
							     const struct drm_display_info *info,
 | 
				
			||||||
 | 
							     const struct drm_display_mode *mode)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/* LVDS output clock range 25..154 MHz */
 | 
				
			||||||
 | 
						if (mode->clock < 25000)
 | 
				
			||||||
 | 
							return MODE_CLOCK_LOW;
 | 
				
			||||||
 | 
						if (mode->clock > 154000)
 | 
				
			||||||
 | 
							return MODE_CLOCK_HIGH;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return MODE_OK;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void sn65dsi83_mode_set(struct drm_bridge *bridge,
 | 
				
			||||||
 | 
								       const struct drm_display_mode *mode,
 | 
				
			||||||
 | 
								       const struct drm_display_mode *adj)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx->mode = *adj;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static bool sn65dsi83_mode_fixup(struct drm_bridge *bridge,
 | 
				
			||||||
 | 
									 const struct drm_display_mode *mode,
 | 
				
			||||||
 | 
									 struct drm_display_mode *adj)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
 | 
				
			||||||
 | 
						u32 input_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
 | 
				
			||||||
 | 
						struct drm_encoder *encoder = bridge->encoder;
 | 
				
			||||||
 | 
						struct drm_device *ddev = encoder->dev;
 | 
				
			||||||
 | 
						struct drm_connector *connector;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* The DSI format is always RGB888_1X24 */
 | 
				
			||||||
 | 
						list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
 | 
				
			||||||
 | 
							switch (connector->display_info.bus_formats[0]) {
 | 
				
			||||||
 | 
							case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
 | 
				
			||||||
 | 
								ctx->lvds_format_24bpp = false;
 | 
				
			||||||
 | 
								ctx->lvds_format_jeida = true;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
 | 
				
			||||||
 | 
								ctx->lvds_format_24bpp = true;
 | 
				
			||||||
 | 
								ctx->lvds_format_jeida = true;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
 | 
				
			||||||
 | 
								ctx->lvds_format_24bpp = true;
 | 
				
			||||||
 | 
								ctx->lvds_format_jeida = false;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							default:
 | 
				
			||||||
 | 
								/*
 | 
				
			||||||
 | 
								 * Some bridges still don't set the correct
 | 
				
			||||||
 | 
								 * LVDS bus pixel format, use SPWG24 default
 | 
				
			||||||
 | 
								 * format until those are fixed.
 | 
				
			||||||
 | 
								 */
 | 
				
			||||||
 | 
								ctx->lvds_format_24bpp = true;
 | 
				
			||||||
 | 
								ctx->lvds_format_jeida = false;
 | 
				
			||||||
 | 
								dev_warn(ctx->dev,
 | 
				
			||||||
 | 
									 "Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n",
 | 
				
			||||||
 | 
									 connector->display_info.bus_formats[0]);
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							drm_display_info_set_bus_formats(&connector->display_info,
 | 
				
			||||||
 | 
											 &input_bus_format, 1);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return true;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct drm_bridge_funcs sn65dsi83_funcs = {
 | 
				
			||||||
 | 
						.attach		= sn65dsi83_attach,
 | 
				
			||||||
 | 
						.pre_enable	= sn65dsi83_pre_enable,
 | 
				
			||||||
 | 
						.enable		= sn65dsi83_enable,
 | 
				
			||||||
 | 
						.disable	= sn65dsi83_disable,
 | 
				
			||||||
 | 
						.post_disable	= sn65dsi83_post_disable,
 | 
				
			||||||
 | 
						.mode_valid	= sn65dsi83_mode_valid,
 | 
				
			||||||
 | 
						.mode_set	= sn65dsi83_mode_set,
 | 
				
			||||||
 | 
						.mode_fixup	= sn65dsi83_mode_fixup,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct drm_bridge *panel_bridge;
 | 
				
			||||||
 | 
						struct device *dev = ctx->dev;
 | 
				
			||||||
 | 
						struct device_node *endpoint;
 | 
				
			||||||
 | 
						struct drm_panel *panel;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
 | 
				
			||||||
 | 
						ctx->dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
 | 
				
			||||||
 | 
						ctx->host_node = of_graph_get_remote_port_parent(endpoint);
 | 
				
			||||||
 | 
						of_node_put(endpoint);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4)
 | 
				
			||||||
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
						if (!ctx->host_node)
 | 
				
			||||||
 | 
							return -ENODEV;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx->lvds_dual_link = false;
 | 
				
			||||||
 | 
						ctx->lvds_dual_link_even_odd_swap = false;
 | 
				
			||||||
 | 
						if (model != MODEL_SN65DSI83) {
 | 
				
			||||||
 | 
							struct device_node *port2, *port3;
 | 
				
			||||||
 | 
							int dual_link;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							port2 = of_graph_get_port_by_id(dev->of_node, 2);
 | 
				
			||||||
 | 
							port3 = of_graph_get_port_by_id(dev->of_node, 3);
 | 
				
			||||||
 | 
							dual_link = drm_of_lvds_get_dual_link_pixel_order(port2, port3);
 | 
				
			||||||
 | 
							of_node_put(port2);
 | 
				
			||||||
 | 
							of_node_put(port3);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (dual_link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) {
 | 
				
			||||||
 | 
								ctx->lvds_dual_link = true;
 | 
				
			||||||
 | 
								/* Odd pixels to LVDS Channel A, even pixels to B */
 | 
				
			||||||
 | 
								ctx->lvds_dual_link_even_odd_swap = false;
 | 
				
			||||||
 | 
							} else if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) {
 | 
				
			||||||
 | 
								ctx->lvds_dual_link = true;
 | 
				
			||||||
 | 
								/* Even pixels to LVDS Channel A, odd pixels to B */
 | 
				
			||||||
 | 
								ctx->lvds_dual_link_even_odd_swap = true;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
 | 
				
			||||||
 | 
						if (ret < 0)
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
						if (panel) {
 | 
				
			||||||
 | 
							panel_bridge = devm_drm_panel_bridge_add(dev, panel);
 | 
				
			||||||
 | 
							if (IS_ERR(panel_bridge))
 | 
				
			||||||
 | 
								return PTR_ERR(panel_bridge);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx->panel_bridge = panel_bridge;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int sn65dsi83_probe(struct i2c_client *client,
 | 
				
			||||||
 | 
								   const struct i2c_device_id *id)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct device *dev = &client->dev;
 | 
				
			||||||
 | 
						enum sn65dsi83_model model;
 | 
				
			||||||
 | 
						struct sn65dsi83 *ctx;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 | 
				
			||||||
 | 
						if (!ctx)
 | 
				
			||||||
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx->dev = dev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (dev->of_node) {
 | 
				
			||||||
 | 
							model = (enum sn65dsi83_model)(uintptr_t)
 | 
				
			||||||
 | 
								of_device_get_match_data(dev);
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							model = id->driver_data;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx->enable_gpio = devm_gpiod_get(ctx->dev, "enable", GPIOD_OUT_LOW);
 | 
				
			||||||
 | 
						if (IS_ERR(ctx->enable_gpio))
 | 
				
			||||||
 | 
							return PTR_ERR(ctx->enable_gpio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = sn65dsi83_parse_dt(ctx, model);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx->regmap = devm_regmap_init_i2c(client, &sn65dsi83_regmap_config);
 | 
				
			||||||
 | 
						if (IS_ERR(ctx->regmap))
 | 
				
			||||||
 | 
							return PTR_ERR(ctx->regmap);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dev_set_drvdata(dev, ctx);
 | 
				
			||||||
 | 
						i2c_set_clientdata(client, ctx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx->bridge.funcs = &sn65dsi83_funcs;
 | 
				
			||||||
 | 
						ctx->bridge.of_node = dev->of_node;
 | 
				
			||||||
 | 
						drm_bridge_add(&ctx->bridge);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int sn65dsi83_remove(struct i2c_client *client)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct sn65dsi83 *ctx = i2c_get_clientdata(client);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mipi_dsi_detach(ctx->dsi);
 | 
				
			||||||
 | 
						mipi_dsi_device_unregister(ctx->dsi);
 | 
				
			||||||
 | 
						drm_bridge_remove(&ctx->bridge);
 | 
				
			||||||
 | 
						of_node_put(ctx->host_node);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static struct i2c_device_id sn65dsi83_id[] = {
 | 
				
			||||||
 | 
						{ "ti,sn65dsi83", MODEL_SN65DSI83 },
 | 
				
			||||||
 | 
						{ "ti,sn65dsi84", MODEL_SN65DSI84 },
 | 
				
			||||||
 | 
						{},
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					MODULE_DEVICE_TABLE(i2c, sn65dsi83_id);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct of_device_id sn65dsi83_match_table[] = {
 | 
				
			||||||
 | 
						{ .compatible = "ti,sn65dsi83", .data = (void *)MODEL_SN65DSI83 },
 | 
				
			||||||
 | 
						{ .compatible = "ti,sn65dsi84", .data = (void *)MODEL_SN65DSI84 },
 | 
				
			||||||
 | 
						{},
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					MODULE_DEVICE_TABLE(of, sn65dsi83_match_table);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static struct i2c_driver sn65dsi83_driver = {
 | 
				
			||||||
 | 
						.probe = sn65dsi83_probe,
 | 
				
			||||||
 | 
						.remove = sn65dsi83_remove,
 | 
				
			||||||
 | 
						.id_table = sn65dsi83_id,
 | 
				
			||||||
 | 
						.driver = {
 | 
				
			||||||
 | 
							.name = "sn65dsi83",
 | 
				
			||||||
 | 
							.of_match_table = sn65dsi83_match_table,
 | 
				
			||||||
 | 
						},
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					module_i2c_driver(sn65dsi83_driver);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
 | 
				
			||||||
 | 
					MODULE_DESCRIPTION("TI SN65DSI83 DSI to LVDS bridge driver");
 | 
				
			||||||
 | 
					MODULE_LICENSE("GPL v2");
 | 
				
			||||||
| 
						 | 
					@ -28,6 +28,7 @@
 | 
				
			||||||
 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 | 
					 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <linux/dma-buf-map.h>
 | 
				
			||||||
#include <linux/export.h>
 | 
					#include <linux/export.h>
 | 
				
			||||||
#include <linux/highmem.h>
 | 
					#include <linux/highmem.h>
 | 
				
			||||||
#include <linux/mem_encrypt.h>
 | 
					#include <linux/mem_encrypt.h>
 | 
				
			||||||
| 
						 | 
					@ -35,6 +36,9 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <drm/drm_cache.h>
 | 
					#include <drm/drm_cache.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* A small bounce buffer that fits on the stack. */
 | 
				
			||||||
 | 
					#define MEMCPY_BOUNCE_SIZE 128
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(CONFIG_X86)
 | 
					#if defined(CONFIG_X86)
 | 
				
			||||||
#include <asm/smp.h>
 | 
					#include <asm/smp.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -209,3 +213,147 @@ bool drm_need_swiotlb(int dma_bits)
 | 
				
			||||||
	return max_iomem > ((u64)1 << dma_bits);
 | 
						return max_iomem > ((u64)1 << dma_bits);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(drm_need_swiotlb);
 | 
					EXPORT_SYMBOL(drm_need_swiotlb);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void memcpy_fallback(struct dma_buf_map *dst,
 | 
				
			||||||
 | 
								    const struct dma_buf_map *src,
 | 
				
			||||||
 | 
								    unsigned long len)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (!dst->is_iomem && !src->is_iomem) {
 | 
				
			||||||
 | 
							memcpy(dst->vaddr, src->vaddr, len);
 | 
				
			||||||
 | 
						} else if (!src->is_iomem) {
 | 
				
			||||||
 | 
							dma_buf_map_memcpy_to(dst, src->vaddr, len);
 | 
				
			||||||
 | 
						} else if (!dst->is_iomem) {
 | 
				
			||||||
 | 
							memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							/*
 | 
				
			||||||
 | 
							 * Bounce size is not performance tuned, but using a
 | 
				
			||||||
 | 
							 * bounce buffer like this is significantly faster than
 | 
				
			||||||
 | 
							 * resorting to ioreadxx() + iowritexx().
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							char bounce[MEMCPY_BOUNCE_SIZE];
 | 
				
			||||||
 | 
							void __iomem *_src = src->vaddr_iomem;
 | 
				
			||||||
 | 
							void __iomem *_dst = dst->vaddr_iomem;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							while (len >= MEMCPY_BOUNCE_SIZE) {
 | 
				
			||||||
 | 
								memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
 | 
				
			||||||
 | 
								memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
 | 
				
			||||||
 | 
								_src += MEMCPY_BOUNCE_SIZE;
 | 
				
			||||||
 | 
								_dst += MEMCPY_BOUNCE_SIZE;
 | 
				
			||||||
 | 
								len -= MEMCPY_BOUNCE_SIZE;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if (len) {
 | 
				
			||||||
 | 
								memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
 | 
				
			||||||
 | 
								memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						kernel_fpu_begin();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						while (len >= 4) {
 | 
				
			||||||
 | 
							asm("movntdqa	(%0), %%xmm0\n"
 | 
				
			||||||
 | 
							    "movntdqa 16(%0), %%xmm1\n"
 | 
				
			||||||
 | 
							    "movntdqa 32(%0), %%xmm2\n"
 | 
				
			||||||
 | 
							    "movntdqa 48(%0), %%xmm3\n"
 | 
				
			||||||
 | 
							    "movaps %%xmm0,   (%1)\n"
 | 
				
			||||||
 | 
							    "movaps %%xmm1, 16(%1)\n"
 | 
				
			||||||
 | 
							    "movaps %%xmm2, 32(%1)\n"
 | 
				
			||||||
 | 
							    "movaps %%xmm3, 48(%1)\n"
 | 
				
			||||||
 | 
							    :: "r" (src), "r" (dst) : "memory");
 | 
				
			||||||
 | 
							src += 64;
 | 
				
			||||||
 | 
							dst += 64;
 | 
				
			||||||
 | 
							len -= 4;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						while (len--) {
 | 
				
			||||||
 | 
							asm("movntdqa (%0), %%xmm0\n"
 | 
				
			||||||
 | 
							    "movaps %%xmm0, (%1)\n"
 | 
				
			||||||
 | 
							    :: "r" (src), "r" (dst) : "memory");
 | 
				
			||||||
 | 
							src += 16;
 | 
				
			||||||
 | 
							dst += 16;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						kernel_fpu_end();
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * __drm_memcpy_from_wc copies @len bytes from @src to @dst using
 | 
				
			||||||
 | 
					 * non-temporal instructions where available. Note that all arguments
 | 
				
			||||||
 | 
					 * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
 | 
				
			||||||
 | 
					 * of 16.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static void __drm_memcpy_from_wc(void *dst, const void *src, unsigned long len)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
 | 
				
			||||||
 | 
							memcpy(dst, src, len);
 | 
				
			||||||
 | 
						else if (likely(len))
 | 
				
			||||||
 | 
							__memcpy_ntdqa(dst, src, len >> 4);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * drm_memcpy_from_wc - Perform the fastest available memcpy from a source
 | 
				
			||||||
 | 
					 * that may be WC.
 | 
				
			||||||
 | 
					 * @dst: The destination pointer
 | 
				
			||||||
 | 
					 * @src: The source pointer
 | 
				
			||||||
 | 
					 * @len: The size of the area o transfer in bytes
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Tries an arch optimized memcpy for prefetching reading out of a WC region,
 | 
				
			||||||
 | 
					 * and if no such beast is available, falls back to a normal memcpy.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void drm_memcpy_from_wc(struct dma_buf_map *dst,
 | 
				
			||||||
 | 
								const struct dma_buf_map *src,
 | 
				
			||||||
 | 
								unsigned long len)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (WARN_ON(in_interrupt())) {
 | 
				
			||||||
 | 
							memcpy_fallback(dst, src, len);
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (static_branch_likely(&has_movntdqa)) {
 | 
				
			||||||
 | 
							__drm_memcpy_from_wc(dst->is_iomem ?
 | 
				
			||||||
 | 
									     (void __force *)dst->vaddr_iomem :
 | 
				
			||||||
 | 
									     dst->vaddr,
 | 
				
			||||||
 | 
									     src->is_iomem ?
 | 
				
			||||||
 | 
									     (void const __force *)src->vaddr_iomem :
 | 
				
			||||||
 | 
									     src->vaddr,
 | 
				
			||||||
 | 
									     len);
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						memcpy_fallback(dst, src, len);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL(drm_memcpy_from_wc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * drm_memcpy_init_early - One time initialization of the WC memcpy code
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void drm_memcpy_init_early(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
 | 
				
			||||||
 | 
						 * emulation. So don't enable movntdqa in hypervisor guest.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (static_cpu_has(X86_FEATURE_XMM4_1) &&
 | 
				
			||||||
 | 
						    !boot_cpu_has(X86_FEATURE_HYPERVISOR))
 | 
				
			||||||
 | 
							static_branch_enable(&has_movntdqa);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					void drm_memcpy_from_wc(struct dma_buf_map *dst,
 | 
				
			||||||
 | 
								const struct dma_buf_map *src,
 | 
				
			||||||
 | 
								unsigned long len)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						WARN_ON(in_interrupt());
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						memcpy_fallback(dst, src, len);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL(drm_memcpy_from_wc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void drm_memcpy_init_early(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#endif /* CONFIG_X86 */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -35,6 +35,7 @@
 | 
				
			||||||
#include <linux/slab.h>
 | 
					#include <linux/slab.h>
 | 
				
			||||||
#include <linux/srcu.h>
 | 
					#include <linux/srcu.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <drm/drm_cache.h>
 | 
				
			||||||
#include <drm/drm_client.h>
 | 
					#include <drm/drm_client.h>
 | 
				
			||||||
#include <drm/drm_color_mgmt.h>
 | 
					#include <drm/drm_color_mgmt.h>
 | 
				
			||||||
#include <drm/drm_drv.h>
 | 
					#include <drm/drm_drv.h>
 | 
				
			||||||
| 
						 | 
					@ -1041,6 +1042,7 @@ static int __init drm_core_init(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	drm_connector_ida_init();
 | 
						drm_connector_ida_init();
 | 
				
			||||||
	idr_init(&drm_minors_idr);
 | 
						idr_init(&drm_minors_idr);
 | 
				
			||||||
 | 
						drm_memcpy_init_early();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = drm_sysfs_init();
 | 
						ret = drm_sysfs_init();
 | 
				
			||||||
	if (ret < 0) {
 | 
						if (ret < 0) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -770,8 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
 | 
						ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
 | 
				
			||||||
						  true, timeout);
 | 
					 | 
				
			||||||
	if (ret == 0)
 | 
						if (ret == 0)
 | 
				
			||||||
		ret = -ETIME;
 | 
							ret = -ETIME;
 | 
				
			||||||
	else if (ret > 0)
 | 
						else if (ret > 0)
 | 
				
			||||||
| 
						 | 
					@ -1375,12 +1374,12 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!write) {
 | 
						if (!write) {
 | 
				
			||||||
		struct dma_fence *fence =
 | 
							struct dma_fence *fence =
 | 
				
			||||||
			dma_resv_get_excl_rcu(obj->resv);
 | 
								dma_resv_get_excl_unlocked(obj->resv);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		return drm_gem_fence_array_add(fence_array, fence);
 | 
							return drm_gem_fence_array_add(fence_array, fence);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = dma_resv_get_fences_rcu(obj->resv, NULL,
 | 
						ret = dma_resv_get_fences(obj->resv, NULL,
 | 
				
			||||||
						&fence_count, &fences);
 | 
											&fence_count, &fences);
 | 
				
			||||||
	if (ret || !fence_count)
 | 
						if (ret || !fence_count)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -147,7 +147,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	obj = drm_gem_fb_get_obj(state->fb, 0);
 | 
						obj = drm_gem_fb_get_obj(state->fb, 0);
 | 
				
			||||||
	fence = dma_resv_get_excl_rcu(obj->resv);
 | 
						fence = dma_resv_get_excl_unlocked(obj->resv);
 | 
				
			||||||
	drm_atomic_set_fence_for_plane(state, fence);
 | 
						drm_atomic_set_fence_for_plane(state, fence);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -104,8 +104,7 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
 | 
				
			||||||
 * @size: size of the object to allocate
 | 
					 * @size: size of the object to allocate
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * This function creates a CMA GEM object and allocates a contiguous chunk of
 | 
					 * This function creates a CMA GEM object and allocates a contiguous chunk of
 | 
				
			||||||
 * memory as backing store. The backing memory has the writecombine attribute
 | 
					 * memory as backing store.
 | 
				
			||||||
 * set.
 | 
					 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Returns:
 | 
					 * Returns:
 | 
				
			||||||
 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
 | 
					 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -40,12 +40,12 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
 | 
				
			||||||
	const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
 | 
						const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	drm_printf_indent(p, indent, "placement=");
 | 
						drm_printf_indent(p, indent, "placement=");
 | 
				
			||||||
	drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname));
 | 
						drm_print_bits(p, bo->resource->placement, plname, ARRAY_SIZE(plname));
 | 
				
			||||||
	drm_printf(p, "\n");
 | 
						drm_printf(p, "\n");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->mem.bus.is_iomem)
 | 
						if (bo->resource->bus.is_iomem)
 | 
				
			||||||
		drm_printf_indent(p, indent, "bus.offset=%lx\n",
 | 
							drm_printf_indent(p, indent, "bus.offset=%lx\n",
 | 
				
			||||||
				  (unsigned long)bo->mem.bus.offset);
 | 
									  (unsigned long)bo->resource->bus.offset);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(drm_gem_ttm_print_info);
 | 
					EXPORT_SYMBOL(drm_gem_ttm_print_info);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -17,6 +17,8 @@
 | 
				
			||||||
#include <drm/drm_prime.h>
 | 
					#include <drm/drm_prime.h>
 | 
				
			||||||
#include <drm/drm_simple_kms_helper.h>
 | 
					#include <drm/drm_simple_kms_helper.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <drm/ttm/ttm_range_manager.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
 | 
					static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -248,10 +250,11 @@ EXPORT_SYMBOL(drm_gem_vram_put);
 | 
				
			||||||
static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
 | 
					static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* Keep TTM behavior for now, remove when drivers are audited */
 | 
						/* Keep TTM behavior for now, remove when drivers are audited */
 | 
				
			||||||
	if (WARN_ON_ONCE(!gbo->bo.mem.mm_node))
 | 
						if (WARN_ON_ONCE(!gbo->bo.resource ||
 | 
				
			||||||
 | 
								 gbo->bo.resource->mem_type == TTM_PL_SYSTEM))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return gbo->bo.mem.start;
 | 
						return gbo->bo.resource->start;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -390,14 +390,12 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (op & ETNA_PREP_NOSYNC) {
 | 
						if (op & ETNA_PREP_NOSYNC) {
 | 
				
			||||||
		if (!dma_resv_test_signaled_rcu(obj->resv,
 | 
							if (!dma_resv_test_signaled(obj->resv, write))
 | 
				
			||||||
							  write))
 | 
					 | 
				
			||||||
			return -EBUSY;
 | 
								return -EBUSY;
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
 | 
							unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ret = dma_resv_wait_timeout_rcu(obj->resv,
 | 
							ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
 | 
				
			||||||
							  write, true, remain);
 | 
					 | 
				
			||||||
		if (ret <= 0)
 | 
							if (ret <= 0)
 | 
				
			||||||
			return ret == 0 ? -ETIMEDOUT : ret;
 | 
								return ret == 0 ? -ETIMEDOUT : ret;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -461,7 +459,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 | 
				
			||||||
			off, etnaviv_obj->vaddr, obj->size);
 | 
								off, etnaviv_obj->vaddr, obj->size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rcu_read_lock();
 | 
						rcu_read_lock();
 | 
				
			||||||
	fobj = rcu_dereference(robj->fence);
 | 
						fobj = dma_resv_shared_list(robj);
 | 
				
			||||||
	if (fobj) {
 | 
						if (fobj) {
 | 
				
			||||||
		unsigned int i, shared_count = fobj->shared_count;
 | 
							unsigned int i, shared_count = fobj->shared_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -471,7 +469,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fence = rcu_dereference(robj->fence_excl);
 | 
						fence = dma_resv_excl_fence(robj);
 | 
				
			||||||
	if (fence)
 | 
						if (fence)
 | 
				
			||||||
		etnaviv_gem_describe_fence(fence, "Exclusive", m);
 | 
							etnaviv_gem_describe_fence(fence, "Exclusive", m);
 | 
				
			||||||
	rcu_read_unlock();
 | 
						rcu_read_unlock();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -189,13 +189,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
 | 
							if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
 | 
				
			||||||
			ret = dma_resv_get_fences_rcu(robj, &bo->excl,
 | 
								ret = dma_resv_get_fences(robj, &bo->excl,
 | 
				
			||||||
								&bo->nr_shared,
 | 
											  &bo->nr_shared,
 | 
				
			||||||
								&bo->shared);
 | 
											  &bo->shared);
 | 
				
			||||||
			if (ret)
 | 
								if (ret)
 | 
				
			||||||
				return ret;
 | 
									return ret;
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			bo->excl = dma_resv_get_excl_rcu(robj);
 | 
								bo->excl = dma_resv_get_excl_unlocked(robj);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -344,7 +344,7 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * shadow_protect_win() - disable updating values from shadow registers at vsync
 | 
					 * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * @ctx: display and enhancement controller context
 | 
					 * @ctx: display and enhancement controller context
 | 
				
			||||||
 * @win: window to protect registers for
 | 
					 * @win: window to protect registers for
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -88,7 +88,7 @@ void exynos_drm_ipp_unregister(struct device *dev,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules
 | 
					 * exynos_drm_ipp_get_res_ioctl - enumerate all ipp modules
 | 
				
			||||||
 * @dev: DRM device
 | 
					 * @dev: DRM device
 | 
				
			||||||
 * @data: ioctl data
 | 
					 * @data: ioctl data
 | 
				
			||||||
 * @file_priv: DRM file info
 | 
					 * @file_priv: DRM file info
 | 
				
			||||||
| 
						 | 
					@ -136,7 +136,7 @@ static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats
 | 
					 * exynos_drm_ipp_get_caps_ioctl - get ipp module capabilities and formats
 | 
				
			||||||
 * @dev: DRM device
 | 
					 * @dev: DRM device
 | 
				
			||||||
 * @data: ioctl data
 | 
					 * @data: ioctl data
 | 
				
			||||||
 * @file_priv: DRM file info
 | 
					 * @file_priv: DRM file info
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										8
									
								
								drivers/gpu/drm/hyperv/Makefile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								drivers/gpu/drm/hyperv/Makefile
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,8 @@
 | 
				
			||||||
 | 
					# SPDX-License-Identifier: GPL-2.0-only
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					hyperv_drm-y := \
 | 
				
			||||||
 | 
						hyperv_drm_drv.o \
 | 
				
			||||||
 | 
						hyperv_drm_modeset.o \
 | 
				
			||||||
 | 
						hyperv_drm_proto.o
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					obj-$(CONFIG_DRM_HYPERV) += hyperv_drm.o
 | 
				
			||||||
							
								
								
									
										52
									
								
								drivers/gpu/drm/hyperv/hyperv_drm.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										52
									
								
								drivers/gpu/drm/hyperv/hyperv_drm.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,52 @@
 | 
				
			||||||
 | 
					/* SPDX-License-Identifier: GPL-2.0-only */
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Copyright 2021 Microsoft
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef _HYPERV_DRM_H_
 | 
				
			||||||
 | 
					#define _HYPERV_DRM_H_
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define VMBUS_MAX_PACKET_SIZE 0x4000
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct hyperv_drm_device {
 | 
				
			||||||
 | 
						/* drm */
 | 
				
			||||||
 | 
						struct drm_device dev;
 | 
				
			||||||
 | 
						struct drm_simple_display_pipe pipe;
 | 
				
			||||||
 | 
						struct drm_connector connector;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* mode */
 | 
				
			||||||
 | 
						u32 screen_width_max;
 | 
				
			||||||
 | 
						u32 screen_height_max;
 | 
				
			||||||
 | 
						u32 preferred_width;
 | 
				
			||||||
 | 
						u32 preferred_height;
 | 
				
			||||||
 | 
						u32 screen_depth;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* hw */
 | 
				
			||||||
 | 
						struct resource *mem;
 | 
				
			||||||
 | 
						void __iomem *vram;
 | 
				
			||||||
 | 
						unsigned long fb_base;
 | 
				
			||||||
 | 
						unsigned long fb_size;
 | 
				
			||||||
 | 
						struct completion wait;
 | 
				
			||||||
 | 
						u32 synthvid_version;
 | 
				
			||||||
 | 
						u32 mmio_megabytes;
 | 
				
			||||||
 | 
						bool dirt_needed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						u8 init_buf[VMBUS_MAX_PACKET_SIZE];
 | 
				
			||||||
 | 
						u8 recv_buf[VMBUS_MAX_PACKET_SIZE];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						struct hv_device *hdev;
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define to_hv(_dev) container_of(_dev, struct hyperv_drm_device, dev)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* hyperv_drm_modeset */
 | 
				
			||||||
 | 
					int hyperv_mode_config_init(struct hyperv_drm_device *hv);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* hyperv_drm_proto */
 | 
				
			||||||
 | 
					int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp);
 | 
				
			||||||
 | 
					int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
 | 
				
			||||||
 | 
								    u32 w, u32 h, u32 pitch);
 | 
				
			||||||
 | 
					int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect);
 | 
				
			||||||
 | 
					int hyperv_connect_vsp(struct hv_device *hdev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
							
								
								
									
										311
									
								
								drivers/gpu/drm/hyperv/hyperv_drm_drv.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										311
									
								
								drivers/gpu/drm/hyperv/hyperv_drm_drv.c
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,311 @@
 | 
				
			||||||
 | 
					// SPDX-License-Identifier: GPL-2.0-only
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Copyright 2021 Microsoft
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <linux/efi.h>
 | 
				
			||||||
 | 
					#include <linux/hyperv.h>
 | 
				
			||||||
 | 
					#include <linux/module.h>
 | 
				
			||||||
 | 
					#include <linux/pci.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <drm/drm_aperture.h>
 | 
				
			||||||
 | 
					#include <drm/drm_atomic_helper.h>
 | 
				
			||||||
 | 
					#include <drm/drm_drv.h>
 | 
				
			||||||
 | 
					#include <drm/drm_fb_helper.h>
 | 
				
			||||||
 | 
					#include <drm/drm_gem_shmem_helper.h>
 | 
				
			||||||
 | 
					#include <drm/drm_simple_kms_helper.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "hyperv_drm.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define DRIVER_NAME "hyperv_drm"
 | 
				
			||||||
 | 
					#define DRIVER_DESC "DRM driver for Hyper-V synthetic video device"
 | 
				
			||||||
 | 
					#define DRIVER_DATE "2020"
 | 
				
			||||||
 | 
					#define DRIVER_MAJOR 1
 | 
				
			||||||
 | 
					#define DRIVER_MINOR 0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define PCI_VENDOR_ID_MICROSOFT 0x1414
 | 
				
			||||||
 | 
					#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					DEFINE_DRM_GEM_FOPS(hv_fops);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static struct drm_driver hyperv_driver = {
 | 
				
			||||||
 | 
						.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						.name		 = DRIVER_NAME,
 | 
				
			||||||
 | 
						.desc		 = DRIVER_DESC,
 | 
				
			||||||
 | 
						.date		 = DRIVER_DATE,
 | 
				
			||||||
 | 
						.major		 = DRIVER_MAJOR,
 | 
				
			||||||
 | 
						.minor		 = DRIVER_MINOR,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						.fops		 = &hv_fops,
 | 
				
			||||||
 | 
						DRM_GEM_SHMEM_DRIVER_OPS,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_pci_probe(struct pci_dev *pdev,
 | 
				
			||||||
 | 
								    const struct pci_device_id *ent)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void hyperv_pci_remove(struct pci_dev *pdev)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct pci_device_id hyperv_pci_tbl[] = {
 | 
				
			||||||
 | 
						{
 | 
				
			||||||
 | 
							.vendor = PCI_VENDOR_ID_MICROSOFT,
 | 
				
			||||||
 | 
							.device = PCI_DEVICE_ID_HYPERV_VIDEO,
 | 
				
			||||||
 | 
						},
 | 
				
			||||||
 | 
						{ /* end of list */ }
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * PCI stub to support gen1 VM.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static struct pci_driver hyperv_pci_driver = {
 | 
				
			||||||
 | 
						.name =		KBUILD_MODNAME,
 | 
				
			||||||
 | 
						.id_table =	hyperv_pci_tbl,
 | 
				
			||||||
 | 
						.probe =	hyperv_pci_probe,
 | 
				
			||||||
 | 
						.remove =	hyperv_pci_remove,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_setup_gen1(struct hyperv_drm_device *hv)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct drm_device *dev = &hv->dev;
 | 
				
			||||||
 | 
						struct pci_dev *pdev;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
 | 
				
			||||||
 | 
								      PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
 | 
				
			||||||
 | 
						if (!pdev) {
 | 
				
			||||||
 | 
							drm_err(dev, "Unable to find PCI Hyper-V video\n");
 | 
				
			||||||
 | 
							return -ENODEV;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "hypervdrmfb");
 | 
				
			||||||
 | 
						if (ret) {
 | 
				
			||||||
 | 
							drm_err(dev, "Not able to remove boot fb\n");
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (pci_request_region(pdev, 0, DRIVER_NAME) != 0)
 | 
				
			||||||
 | 
							drm_warn(dev, "Cannot request framebuffer, boot fb still active?\n");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0) {
 | 
				
			||||||
 | 
							drm_err(dev, "Resource at bar 0 is not IORESOURCE_MEM\n");
 | 
				
			||||||
 | 
							ret = -ENODEV;
 | 
				
			||||||
 | 
							goto error;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hv->fb_base = pci_resource_start(pdev, 0);
 | 
				
			||||||
 | 
						hv->fb_size = pci_resource_len(pdev, 0);
 | 
				
			||||||
 | 
						if (!hv->fb_base) {
 | 
				
			||||||
 | 
							drm_err(dev, "Resource not available\n");
 | 
				
			||||||
 | 
							ret = -ENODEV;
 | 
				
			||||||
 | 
							goto error;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hv->fb_size = min(hv->fb_size,
 | 
				
			||||||
 | 
								  (unsigned long)(hv->mmio_megabytes * 1024 * 1024));
 | 
				
			||||||
 | 
						hv->vram = devm_ioremap(&pdev->dev, hv->fb_base, hv->fb_size);
 | 
				
			||||||
 | 
						if (!hv->vram) {
 | 
				
			||||||
 | 
							drm_err(dev, "Failed to map vram\n");
 | 
				
			||||||
 | 
							ret = -ENOMEM;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					error:
 | 
				
			||||||
 | 
						pci_dev_put(pdev);
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_setup_gen2(struct hyperv_drm_device *hv,
 | 
				
			||||||
 | 
								     struct hv_device *hdev)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct drm_device *dev = &hv->dev;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base,
 | 
				
			||||||
 | 
											     screen_info.lfb_size,
 | 
				
			||||||
 | 
											     false,
 | 
				
			||||||
 | 
											     "hypervdrmfb");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = vmbus_allocate_mmio(&hv->mem, hdev, 0, -1, hv->fb_size, 0x100000,
 | 
				
			||||||
 | 
									  true);
 | 
				
			||||||
 | 
						if (ret) {
 | 
				
			||||||
 | 
							drm_err(dev, "Failed to allocate mmio\n");
 | 
				
			||||||
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Map the VRAM cacheable for performance. This is also required for VM
 | 
				
			||||||
 | 
						 * connect to display properly for ARM64 Linux VM, as the host also maps
 | 
				
			||||||
 | 
						 * the VRAM cacheable.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						hv->vram = ioremap_cache(hv->mem->start, hv->fb_size);
 | 
				
			||||||
 | 
						if (!hv->vram) {
 | 
				
			||||||
 | 
							drm_err(dev, "Failed to map vram\n");
 | 
				
			||||||
 | 
							ret = -ENOMEM;
 | 
				
			||||||
 | 
							goto error;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hv->fb_base = hv->mem->start;
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					error:
 | 
				
			||||||
 | 
						vmbus_free_mmio(hv->mem->start, hv->fb_size);
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_vmbus_probe(struct hv_device *hdev,
 | 
				
			||||||
 | 
								      const struct hv_vmbus_device_id *dev_id)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv;
 | 
				
			||||||
 | 
						struct drm_device *dev;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hv = devm_drm_dev_alloc(&hdev->device, &hyperv_driver,
 | 
				
			||||||
 | 
									struct hyperv_drm_device, dev);
 | 
				
			||||||
 | 
						if (IS_ERR(hv))
 | 
				
			||||||
 | 
							return PTR_ERR(hv);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dev = &hv->dev;
 | 
				
			||||||
 | 
						init_completion(&hv->wait);
 | 
				
			||||||
 | 
						hv_set_drvdata(hdev, hv);
 | 
				
			||||||
 | 
						hv->hdev = hdev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = hyperv_connect_vsp(hdev);
 | 
				
			||||||
 | 
						if (ret) {
 | 
				
			||||||
 | 
							drm_err(dev, "Failed to connect to vmbus.\n");
 | 
				
			||||||
 | 
							goto err_hv_set_drv_data;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (efi_enabled(EFI_BOOT))
 | 
				
			||||||
 | 
							ret = hyperv_setup_gen2(hv, hdev);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							ret = hyperv_setup_gen1(hv);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							goto err_vmbus_close;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Should be done only once during init and resume. Failing to update
 | 
				
			||||||
 | 
						 * vram location is not fatal. Device will update dirty area till
 | 
				
			||||||
 | 
						 * preferred resolution only.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						ret = hyperv_update_vram_location(hdev, hv->fb_base);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							drm_warn(dev, "Failed to update vram location.\n");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hv->dirt_needed = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = hyperv_mode_config_init(hv);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							goto err_vmbus_close;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = drm_dev_register(dev, 0);
 | 
				
			||||||
 | 
						if (ret) {
 | 
				
			||||||
 | 
							drm_err(dev, "Failed to register drm driver.\n");
 | 
				
			||||||
 | 
							goto err_vmbus_close;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						drm_fbdev_generic_setup(dev, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					err_vmbus_close:
 | 
				
			||||||
 | 
						vmbus_close(hdev->channel);
 | 
				
			||||||
 | 
					err_hv_set_drv_data:
 | 
				
			||||||
 | 
						hv_set_drvdata(hdev, NULL);
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_vmbus_remove(struct hv_device *hdev)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct drm_device *dev = hv_get_drvdata(hdev);
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = to_hv(dev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						drm_dev_unplug(dev);
 | 
				
			||||||
 | 
						drm_atomic_helper_shutdown(dev);
 | 
				
			||||||
 | 
						vmbus_close(hdev->channel);
 | 
				
			||||||
 | 
						hv_set_drvdata(hdev, NULL);
 | 
				
			||||||
 | 
						vmbus_free_mmio(hv->mem->start, hv->fb_size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_vmbus_suspend(struct hv_device *hdev)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct drm_device *dev = hv_get_drvdata(hdev);
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = drm_mode_config_helper_suspend(dev);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						vmbus_close(hdev->channel);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_vmbus_resume(struct hv_device *hdev)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct drm_device *dev = hv_get_drvdata(hdev);
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = to_hv(dev);
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = hyperv_connect_vsp(hdev);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = hyperv_update_vram_location(hdev, hv->fb_base);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return drm_mode_config_helper_resume(dev);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct hv_vmbus_device_id hyperv_vmbus_tbl[] = {
 | 
				
			||||||
 | 
						/* Synthetic Video Device GUID */
 | 
				
			||||||
 | 
						{HV_SYNTHVID_GUID},
 | 
				
			||||||
 | 
						{}
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static struct hv_driver hyperv_hv_driver = {
 | 
				
			||||||
 | 
						.name = KBUILD_MODNAME,
 | 
				
			||||||
 | 
						.id_table = hyperv_vmbus_tbl,
 | 
				
			||||||
 | 
						.probe = hyperv_vmbus_probe,
 | 
				
			||||||
 | 
						.remove = hyperv_vmbus_remove,
 | 
				
			||||||
 | 
						.suspend = hyperv_vmbus_suspend,
 | 
				
			||||||
 | 
						.resume = hyperv_vmbus_resume,
 | 
				
			||||||
 | 
						.driver = {
 | 
				
			||||||
 | 
							.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 | 
				
			||||||
 | 
						},
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int __init hyperv_init(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = pci_register_driver(&hyperv_pci_driver);
 | 
				
			||||||
 | 
						if (ret != 0)
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return vmbus_driver_register(&hyperv_hv_driver);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void __exit hyperv_exit(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						vmbus_driver_unregister(&hyperv_hv_driver);
 | 
				
			||||||
 | 
						pci_unregister_driver(&hyperv_pci_driver);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					module_init(hyperv_init);
 | 
				
			||||||
 | 
					module_exit(hyperv_exit);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					MODULE_DEVICE_TABLE(pci, hyperv_pci_tbl);
 | 
				
			||||||
 | 
					MODULE_DEVICE_TABLE(vmbus, hyperv_vmbus_tbl);
 | 
				
			||||||
 | 
					MODULE_LICENSE("GPL");
 | 
				
			||||||
 | 
					MODULE_AUTHOR("Deepak Rawat <drawat.floss@gmail.com>");
 | 
				
			||||||
 | 
					MODULE_DESCRIPTION("DRM driver for Hyper-V synthetic video device");
 | 
				
			||||||
							
								
								
									
										231
									
								
								drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										231
									
								
								drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,231 @@
 | 
				
			||||||
 | 
					// SPDX-License-Identifier: GPL-2.0-only
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Copyright 2021 Microsoft
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <linux/hyperv.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <drm/drm_damage_helper.h>
 | 
				
			||||||
 | 
					#include <drm/drm_drv.h>
 | 
				
			||||||
 | 
					#include <drm/drm_fb_helper.h>
 | 
				
			||||||
 | 
					#include <drm/drm_format_helper.h>
 | 
				
			||||||
 | 
					#include <drm/drm_fourcc.h>
 | 
				
			||||||
 | 
					#include <drm/drm_gem_atomic_helper.h>
 | 
				
			||||||
 | 
					#include <drm/drm_gem_framebuffer_helper.h>
 | 
				
			||||||
 | 
					#include <drm/drm_gem_shmem_helper.h>
 | 
				
			||||||
 | 
					#include <drm/drm_probe_helper.h>
 | 
				
			||||||
 | 
					#include <drm/drm_simple_kms_helper.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "hyperv_drm.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
 | 
				
			||||||
 | 
									    const struct dma_buf_map *map,
 | 
				
			||||||
 | 
									    struct drm_rect *rect)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = to_hv(fb->dev);
 | 
				
			||||||
 | 
						void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
 | 
				
			||||||
 | 
						int idx;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!drm_dev_enter(&hv->dev, &idx))
 | 
				
			||||||
 | 
							return -ENODEV;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						drm_fb_memcpy_dstclip(hv->vram, fb->pitches[0], vmap, fb, rect);
 | 
				
			||||||
 | 
						drm_dev_exit(idx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb, const struct dma_buf_map *map)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct drm_rect fullscreen = {
 | 
				
			||||||
 | 
							.x1 = 0,
 | 
				
			||||||
 | 
							.x2 = fb->width,
 | 
				
			||||||
 | 
							.y1 = 0,
 | 
				
			||||||
 | 
							.y2 = fb->height,
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
						return hyperv_blit_to_vram_rect(fb, map, &fullscreen);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_connector_get_modes(struct drm_connector *connector)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = to_hv(connector->dev);
 | 
				
			||||||
 | 
						int count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						count = drm_add_modes_noedid(connector,
 | 
				
			||||||
 | 
									     connector->dev->mode_config.max_width,
 | 
				
			||||||
 | 
									     connector->dev->mode_config.max_height);
 | 
				
			||||||
 | 
						drm_set_preferred_mode(connector, hv->preferred_width,
 | 
				
			||||||
 | 
								       hv->preferred_height);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return count;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct drm_connector_helper_funcs hyperv_connector_helper_funcs = {
 | 
				
			||||||
 | 
						.get_modes = hyperv_connector_get_modes,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct drm_connector_funcs hyperv_connector_funcs = {
 | 
				
			||||||
 | 
						.fill_modes = drm_helper_probe_single_connector_modes,
 | 
				
			||||||
 | 
						.destroy = drm_connector_cleanup,
 | 
				
			||||||
 | 
						.reset = drm_atomic_helper_connector_reset,
 | 
				
			||||||
 | 
						.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 | 
				
			||||||
 | 
						.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline int hyperv_conn_init(struct hyperv_drm_device *hv)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						drm_connector_helper_add(&hv->connector, &hyperv_connector_helper_funcs);
 | 
				
			||||||
 | 
						return drm_connector_init(&hv->dev, &hv->connector,
 | 
				
			||||||
 | 
									  &hyperv_connector_funcs,
 | 
				
			||||||
 | 
									  DRM_MODE_CONNECTOR_VIRTUAL);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_check_size(struct hyperv_drm_device *hv, int w, int h,
 | 
				
			||||||
 | 
								     struct drm_framebuffer *fb)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						u32 pitch = w * (hv->screen_depth / 8);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (fb)
 | 
				
			||||||
 | 
							pitch = fb->pitches[0];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (pitch * h > hv->fb_size)
 | 
				
			||||||
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
 | 
				
			||||||
 | 
								       struct drm_crtc_state *crtc_state,
 | 
				
			||||||
 | 
								       struct drm_plane_state *plane_state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
 | 
				
			||||||
 | 
						struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hyperv_update_situation(hv->hdev, 1,  hv->screen_depth,
 | 
				
			||||||
 | 
									crtc_state->mode.hdisplay,
 | 
				
			||||||
 | 
									crtc_state->mode.vdisplay,
 | 
				
			||||||
 | 
									plane_state->fb->pitches[0]);
 | 
				
			||||||
 | 
						hyperv_blit_to_vram_fullscreen(plane_state->fb, &shadow_plane_state->map[0]);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
 | 
				
			||||||
 | 
								     struct drm_plane_state *plane_state,
 | 
				
			||||||
 | 
								     struct drm_crtc_state *crtc_state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
 | 
				
			||||||
 | 
						struct drm_framebuffer *fb = plane_state->fb;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (fb->format->format != DRM_FORMAT_XRGB8888)
 | 
				
			||||||
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (fb->pitches[0] * fb->height > hv->fb_size)
 | 
				
			||||||
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void hyperv_pipe_update(struct drm_simple_display_pipe *pipe,
 | 
				
			||||||
 | 
								       struct drm_plane_state *old_state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
 | 
				
			||||||
 | 
						struct drm_plane_state *state = pipe->plane.state;
 | 
				
			||||||
 | 
						struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
 | 
				
			||||||
 | 
						struct drm_rect rect;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (drm_atomic_helper_damage_merged(old_state, state, &rect)) {
 | 
				
			||||||
 | 
							hyperv_blit_to_vram_rect(state->fb, &shadow_plane_state->map[0], &rect);
 | 
				
			||||||
 | 
							hyperv_update_dirt(hv->hdev, &rect);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct drm_simple_display_pipe_funcs hyperv_pipe_funcs = {
 | 
				
			||||||
 | 
						.enable	= hyperv_pipe_enable,
 | 
				
			||||||
 | 
						.check = hyperv_pipe_check,
 | 
				
			||||||
 | 
						.update	= hyperv_pipe_update,
 | 
				
			||||||
 | 
						DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const uint32_t hyperv_formats[] = {
 | 
				
			||||||
 | 
						DRM_FORMAT_XRGB8888,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const uint64_t hyperv_modifiers[] = {
 | 
				
			||||||
 | 
						DRM_FORMAT_MOD_LINEAR,
 | 
				
			||||||
 | 
						DRM_FORMAT_MOD_INVALID
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline int hyperv_pipe_init(struct hyperv_drm_device *hv)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = drm_simple_display_pipe_init(&hv->dev,
 | 
				
			||||||
 | 
										   &hv->pipe,
 | 
				
			||||||
 | 
										   &hyperv_pipe_funcs,
 | 
				
			||||||
 | 
										   hyperv_formats,
 | 
				
			||||||
 | 
										   ARRAY_SIZE(hyperv_formats),
 | 
				
			||||||
 | 
										   NULL,
 | 
				
			||||||
 | 
										   &hv->connector);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						drm_plane_enable_fb_damage_clips(&hv->pipe.plane);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static enum drm_mode_status
 | 
				
			||||||
 | 
					hyperv_mode_valid(struct drm_device *dev,
 | 
				
			||||||
 | 
							  const struct drm_display_mode *mode)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = to_hv(dev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (hyperv_check_size(hv, mode->hdisplay, mode->vdisplay, NULL))
 | 
				
			||||||
 | 
							return MODE_BAD;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return MODE_OK;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static const struct drm_mode_config_funcs hyperv_mode_config_funcs = {
 | 
				
			||||||
 | 
						.fb_create = drm_gem_fb_create_with_dirty,
 | 
				
			||||||
 | 
						.mode_valid = hyperv_mode_valid,
 | 
				
			||||||
 | 
						.atomic_check = drm_atomic_helper_check,
 | 
				
			||||||
 | 
						.atomic_commit = drm_atomic_helper_commit,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int hyperv_mode_config_init(struct hyperv_drm_device *hv)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct drm_device *dev = &hv->dev;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = drmm_mode_config_init(dev);
 | 
				
			||||||
 | 
						if (ret) {
 | 
				
			||||||
 | 
							drm_err(dev, "Failed to initialized mode setting.\n");
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dev->mode_config.min_width = 0;
 | 
				
			||||||
 | 
						dev->mode_config.min_height = 0;
 | 
				
			||||||
 | 
						dev->mode_config.max_width = hv->screen_width_max;
 | 
				
			||||||
 | 
						dev->mode_config.max_height = hv->screen_height_max;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dev->mode_config.preferred_depth = hv->screen_depth;
 | 
				
			||||||
 | 
						dev->mode_config.prefer_shadow = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dev->mode_config.funcs = &hyperv_mode_config_funcs;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = hyperv_conn_init(hv);
 | 
				
			||||||
 | 
						if (ret) {
 | 
				
			||||||
 | 
							drm_err(dev, "Failed to initialized connector.\n");
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = hyperv_pipe_init(hv);
 | 
				
			||||||
 | 
						if (ret) {
 | 
				
			||||||
 | 
							drm_err(dev, "Failed to initialized pipe.\n");
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						drm_mode_config_reset(dev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										485
									
								
								drivers/gpu/drm/hyperv/hyperv_drm_proto.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										485
									
								
								drivers/gpu/drm/hyperv/hyperv_drm_proto.c
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,485 @@
 | 
				
			||||||
 | 
					// SPDX-License-Identifier: GPL-2.0-only
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Copyright 2021 Microsoft
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Portions of this code is derived from hyperv_fb.c
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <linux/hyperv.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <drm/drm_print.h>
 | 
				
			||||||
 | 
					#include <drm/drm_simple_kms_helper.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "hyperv_drm.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define VMBUS_RING_BUFSIZE (256 * 1024)
 | 
				
			||||||
 | 
					#define VMBUS_VSP_TIMEOUT (10 * HZ)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
 | 
				
			||||||
 | 
					#define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
 | 
				
			||||||
 | 
					#define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
 | 
				
			||||||
 | 
					#define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
 | 
				
			||||||
 | 
					#define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
 | 
				
			||||||
 | 
					#define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define SYNTHVID_DEPTH_WIN7 16
 | 
				
			||||||
 | 
					#define SYNTHVID_DEPTH_WIN8 32
 | 
				
			||||||
 | 
					#define SYNTHVID_FB_SIZE_WIN7 (4 * 1024 * 1024)
 | 
				
			||||||
 | 
					#define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
 | 
				
			||||||
 | 
					#define SYNTHVID_WIDTH_MAX_WIN7 1600
 | 
				
			||||||
 | 
					#define SYNTHVID_HEIGHT_MAX_WIN7 1200
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum pipe_msg_type {
 | 
				
			||||||
 | 
						PIPE_MSG_INVALID,
 | 
				
			||||||
 | 
						PIPE_MSG_DATA,
 | 
				
			||||||
 | 
						PIPE_MSG_MAX
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum synthvid_msg_type {
 | 
				
			||||||
 | 
						SYNTHVID_ERROR			= 0,
 | 
				
			||||||
 | 
						SYNTHVID_VERSION_REQUEST	= 1,
 | 
				
			||||||
 | 
						SYNTHVID_VERSION_RESPONSE	= 2,
 | 
				
			||||||
 | 
						SYNTHVID_VRAM_LOCATION		= 3,
 | 
				
			||||||
 | 
						SYNTHVID_VRAM_LOCATION_ACK	= 4,
 | 
				
			||||||
 | 
						SYNTHVID_SITUATION_UPDATE	= 5,
 | 
				
			||||||
 | 
						SYNTHVID_SITUATION_UPDATE_ACK	= 6,
 | 
				
			||||||
 | 
						SYNTHVID_POINTER_POSITION	= 7,
 | 
				
			||||||
 | 
						SYNTHVID_POINTER_SHAPE		= 8,
 | 
				
			||||||
 | 
						SYNTHVID_FEATURE_CHANGE		= 9,
 | 
				
			||||||
 | 
						SYNTHVID_DIRT			= 10,
 | 
				
			||||||
 | 
						SYNTHVID_RESOLUTION_REQUEST	= 13,
 | 
				
			||||||
 | 
						SYNTHVID_RESOLUTION_RESPONSE	= 14,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						SYNTHVID_MAX			= 15
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct pipe_msg_hdr {
 | 
				
			||||||
 | 
						u32 type;
 | 
				
			||||||
 | 
						u32 size; /* size of message after this field */
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct hvd_screen_info {
 | 
				
			||||||
 | 
						u16 width;
 | 
				
			||||||
 | 
						u16 height;
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_msg_hdr {
 | 
				
			||||||
 | 
						u32 type;
 | 
				
			||||||
 | 
						u32 size;  /* size of this header + payload after this field */
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_version_req {
 | 
				
			||||||
 | 
						u32 version;
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_version_resp {
 | 
				
			||||||
 | 
						u32 version;
 | 
				
			||||||
 | 
						u8 is_accepted;
 | 
				
			||||||
 | 
						u8 max_video_outputs;
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_vram_location {
 | 
				
			||||||
 | 
						u64 user_ctx;
 | 
				
			||||||
 | 
						u8 is_vram_gpa_specified;
 | 
				
			||||||
 | 
						u64 vram_gpa;
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_vram_location_ack {
 | 
				
			||||||
 | 
						u64 user_ctx;
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct video_output_situation {
 | 
				
			||||||
 | 
						u8 active;
 | 
				
			||||||
 | 
						u32 vram_offset;
 | 
				
			||||||
 | 
						u8 depth_bits;
 | 
				
			||||||
 | 
						u32 width_pixels;
 | 
				
			||||||
 | 
						u32 height_pixels;
 | 
				
			||||||
 | 
						u32 pitch_bytes;
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_situation_update {
 | 
				
			||||||
 | 
						u64 user_ctx;
 | 
				
			||||||
 | 
						u8 video_output_count;
 | 
				
			||||||
 | 
						struct video_output_situation video_output[1];
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_situation_update_ack {
 | 
				
			||||||
 | 
						u64 user_ctx;
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_pointer_position {
 | 
				
			||||||
 | 
						u8 is_visible;
 | 
				
			||||||
 | 
						u8 video_output;
 | 
				
			||||||
 | 
						s32 image_x;
 | 
				
			||||||
 | 
						s32 image_y;
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define SYNTHVID_CURSOR_MAX_X 96
 | 
				
			||||||
 | 
					#define SYNTHVID_CURSOR_MAX_Y 96
 | 
				
			||||||
 | 
					#define SYNTHVID_CURSOR_ARGB_PIXEL_SIZE 4
 | 
				
			||||||
 | 
					#define SYNTHVID_CURSOR_MAX_SIZE (SYNTHVID_CURSOR_MAX_X * \
 | 
				
			||||||
 | 
						SYNTHVID_CURSOR_MAX_Y * SYNTHVID_CURSOR_ARGB_PIXEL_SIZE)
 | 
				
			||||||
 | 
					#define SYNTHVID_CURSOR_COMPLETE (-1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_pointer_shape {
 | 
				
			||||||
 | 
						u8 part_idx;
 | 
				
			||||||
 | 
						u8 is_argb;
 | 
				
			||||||
 | 
						u32 width; /* SYNTHVID_CURSOR_MAX_X at most */
 | 
				
			||||||
 | 
						u32 height; /* SYNTHVID_CURSOR_MAX_Y at most */
 | 
				
			||||||
 | 
						u32 hot_x; /* hotspot relative to upper-left of pointer image */
 | 
				
			||||||
 | 
						u32 hot_y;
 | 
				
			||||||
 | 
						u8 data[4];
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_feature_change {
 | 
				
			||||||
 | 
						u8 is_dirt_needed;
 | 
				
			||||||
 | 
						u8 is_ptr_pos_needed;
 | 
				
			||||||
 | 
						u8 is_ptr_shape_needed;
 | 
				
			||||||
 | 
						u8 is_situ_needed;
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct rect {
 | 
				
			||||||
 | 
						s32 x1, y1; /* top left corner */
 | 
				
			||||||
 | 
						s32 x2, y2; /* bottom right corner, exclusive */
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_dirt {
 | 
				
			||||||
 | 
						u8 video_output;
 | 
				
			||||||
 | 
						u8 dirt_count;
 | 
				
			||||||
 | 
						struct rect rect[1];
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define SYNTHVID_EDID_BLOCK_SIZE	128
 | 
				
			||||||
 | 
					#define	SYNTHVID_MAX_RESOLUTION_COUNT	64
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_supported_resolution_req {
 | 
				
			||||||
 | 
						u8 maximum_resolution_count;
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_supported_resolution_resp {
 | 
				
			||||||
 | 
						u8 edid_block[SYNTHVID_EDID_BLOCK_SIZE];
 | 
				
			||||||
 | 
						u8 resolution_count;
 | 
				
			||||||
 | 
						u8 default_resolution_index;
 | 
				
			||||||
 | 
						u8 is_standard;
 | 
				
			||||||
 | 
						struct hvd_screen_info supported_resolution[SYNTHVID_MAX_RESOLUTION_COUNT];
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct synthvid_msg {
 | 
				
			||||||
 | 
						struct pipe_msg_hdr pipe_hdr;
 | 
				
			||||||
 | 
						struct synthvid_msg_hdr vid_hdr;
 | 
				
			||||||
 | 
						union {
 | 
				
			||||||
 | 
							struct synthvid_version_req ver_req;
 | 
				
			||||||
 | 
							struct synthvid_version_resp ver_resp;
 | 
				
			||||||
 | 
							struct synthvid_vram_location vram;
 | 
				
			||||||
 | 
							struct synthvid_vram_location_ack vram_ack;
 | 
				
			||||||
 | 
							struct synthvid_situation_update situ;
 | 
				
			||||||
 | 
							struct synthvid_situation_update_ack situ_ack;
 | 
				
			||||||
 | 
							struct synthvid_pointer_position ptr_pos;
 | 
				
			||||||
 | 
							struct synthvid_pointer_shape ptr_shape;
 | 
				
			||||||
 | 
							struct synthvid_feature_change feature_chg;
 | 
				
			||||||
 | 
							struct synthvid_dirt dirt;
 | 
				
			||||||
 | 
							struct synthvid_supported_resolution_req resolution_req;
 | 
				
			||||||
 | 
							struct synthvid_supported_resolution_resp resolution_resp;
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					} __packed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline bool hyperv_version_ge(u32 ver1, u32 ver2)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (SYNTHVID_VER_GET_MAJOR(ver1) > SYNTHVID_VER_GET_MAJOR(ver2) ||
 | 
				
			||||||
 | 
						    (SYNTHVID_VER_GET_MAJOR(ver1) == SYNTHVID_VER_GET_MAJOR(ver2) &&
 | 
				
			||||||
 | 
						     SYNTHVID_VER_GET_MINOR(ver1) >= SYNTHVID_VER_GET_MINOR(ver2)))
 | 
				
			||||||
 | 
							return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return false;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline int hyperv_sendpacket(struct hv_device *hdev, struct synthvid_msg *msg)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						static atomic64_t request_id = ATOMIC64_INIT(0);
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						msg->pipe_hdr.type = PIPE_MSG_DATA;
 | 
				
			||||||
 | 
						msg->pipe_hdr.size = msg->vid_hdr.size;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = vmbus_sendpacket(hdev->channel, msg,
 | 
				
			||||||
 | 
								       msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
 | 
				
			||||||
 | 
								       atomic64_inc_return(&request_id),
 | 
				
			||||||
 | 
								       VM_PKT_DATA_INBAND, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							drm_err(&hv->dev, "Unable to send packet via vmbus\n");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_negotiate_version(struct hv_device *hdev, u32 ver)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
 | 
				
			||||||
 | 
						struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
 | 
				
			||||||
 | 
						struct drm_device *dev = &hv->dev;
 | 
				
			||||||
 | 
						unsigned long t;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						memset(msg, 0, sizeof(struct synthvid_msg));
 | 
				
			||||||
 | 
						msg->vid_hdr.type = SYNTHVID_VERSION_REQUEST;
 | 
				
			||||||
 | 
						msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
 | 
				
			||||||
 | 
							sizeof(struct synthvid_version_req);
 | 
				
			||||||
 | 
						msg->ver_req.version = ver;
 | 
				
			||||||
 | 
						hyperv_sendpacket(hdev, msg);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
 | 
				
			||||||
 | 
						if (!t) {
 | 
				
			||||||
 | 
							drm_err(dev, "Time out on waiting version response\n");
 | 
				
			||||||
 | 
							return -ETIMEDOUT;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!msg->ver_resp.is_accepted) {
 | 
				
			||||||
 | 
							drm_err(dev, "Version request not accepted\n");
 | 
				
			||||||
 | 
							return -ENODEV;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hv->synthvid_version = ver;
 | 
				
			||||||
 | 
						drm_info(dev, "Synthvid Version major %d, minor %d\n",
 | 
				
			||||||
 | 
							 SYNTHVID_VER_GET_MAJOR(ver), SYNTHVID_VER_GET_MINOR(ver));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
 | 
				
			||||||
 | 
						struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
 | 
				
			||||||
 | 
						struct drm_device *dev = &hv->dev;
 | 
				
			||||||
 | 
						unsigned long t;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						memset(msg, 0, sizeof(struct synthvid_msg));
 | 
				
			||||||
 | 
						msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION;
 | 
				
			||||||
 | 
						msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
 | 
				
			||||||
 | 
							sizeof(struct synthvid_vram_location);
 | 
				
			||||||
 | 
						msg->vram.user_ctx = vram_pp;
 | 
				
			||||||
 | 
						msg->vram.vram_gpa = vram_pp;
 | 
				
			||||||
 | 
						msg->vram.is_vram_gpa_specified = 1;
 | 
				
			||||||
 | 
						hyperv_sendpacket(hdev, msg);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
 | 
				
			||||||
 | 
						if (!t) {
 | 
				
			||||||
 | 
							drm_err(dev, "Time out on waiting vram location ack\n");
 | 
				
			||||||
 | 
							return -ETIMEDOUT;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if (msg->vram_ack.user_ctx != vram_pp) {
 | 
				
			||||||
 | 
							drm_err(dev, "Unable to set VRAM location\n");
 | 
				
			||||||
 | 
							return -ENODEV;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
 | 
				
			||||||
 | 
								    u32 w, u32 h, u32 pitch)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct synthvid_msg msg;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						memset(&msg, 0, sizeof(struct synthvid_msg));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						msg.vid_hdr.type = SYNTHVID_SITUATION_UPDATE;
 | 
				
			||||||
 | 
						msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
 | 
				
			||||||
 | 
							sizeof(struct synthvid_situation_update);
 | 
				
			||||||
 | 
						msg.situ.user_ctx = 0;
 | 
				
			||||||
 | 
						msg.situ.video_output_count = 1;
 | 
				
			||||||
 | 
						msg.situ.video_output[0].active = active;
 | 
				
			||||||
 | 
						/* vram_offset should always be 0 */
 | 
				
			||||||
 | 
						msg.situ.video_output[0].vram_offset = 0;
 | 
				
			||||||
 | 
						msg.situ.video_output[0].depth_bits = bpp;
 | 
				
			||||||
 | 
						msg.situ.video_output[0].width_pixels = w;
 | 
				
			||||||
 | 
						msg.situ.video_output[0].height_pixels = h;
 | 
				
			||||||
 | 
						msg.situ.video_output[0].pitch_bytes = pitch;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hyperv_sendpacket(hdev, &msg);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
 | 
				
			||||||
 | 
						struct synthvid_msg msg;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!hv->dirt_needed)
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						memset(&msg, 0, sizeof(struct synthvid_msg));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						msg.vid_hdr.type = SYNTHVID_DIRT;
 | 
				
			||||||
 | 
						msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
 | 
				
			||||||
 | 
							sizeof(struct synthvid_dirt);
 | 
				
			||||||
 | 
						msg.dirt.video_output = 0;
 | 
				
			||||||
 | 
						msg.dirt.dirt_count = 1;
 | 
				
			||||||
 | 
						msg.dirt.rect[0].x1 = rect->x1;
 | 
				
			||||||
 | 
						msg.dirt.rect[0].y1 = rect->y1;
 | 
				
			||||||
 | 
						msg.dirt.rect[0].x2 = rect->x2;
 | 
				
			||||||
 | 
						msg.dirt.rect[0].y2 = rect->y2;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hyperv_sendpacket(hdev, &msg);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int hyperv_get_supported_resolution(struct hv_device *hdev)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
 | 
				
			||||||
 | 
						struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
 | 
				
			||||||
 | 
						struct drm_device *dev = &hv->dev;
 | 
				
			||||||
 | 
						unsigned long t;
 | 
				
			||||||
 | 
						u8 index;
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						memset(msg, 0, sizeof(struct synthvid_msg));
 | 
				
			||||||
 | 
						msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
 | 
				
			||||||
 | 
						msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
 | 
				
			||||||
 | 
							sizeof(struct synthvid_supported_resolution_req);
 | 
				
			||||||
 | 
						msg->resolution_req.maximum_resolution_count =
 | 
				
			||||||
 | 
							SYNTHVID_MAX_RESOLUTION_COUNT;
 | 
				
			||||||
 | 
						hyperv_sendpacket(hdev, msg);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
 | 
				
			||||||
 | 
						if (!t) {
 | 
				
			||||||
 | 
							drm_err(dev, "Time out on waiting resolution response\n");
 | 
				
			||||||
 | 
							return -ETIMEDOUT;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (msg->resolution_resp.resolution_count == 0) {
 | 
				
			||||||
 | 
							drm_err(dev, "No supported resolutions\n");
 | 
				
			||||||
 | 
							return -ENODEV;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						index = msg->resolution_resp.default_resolution_index;
 | 
				
			||||||
 | 
						if (index >= msg->resolution_resp.resolution_count) {
 | 
				
			||||||
 | 
							drm_err(dev, "Invalid resolution index: %d\n", index);
 | 
				
			||||||
 | 
							return -ENODEV;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
 | 
				
			||||||
 | 
							hv->screen_width_max = max_t(u32, hv->screen_width_max,
 | 
				
			||||||
 | 
								msg->resolution_resp.supported_resolution[i].width);
 | 
				
			||||||
 | 
							hv->screen_height_max = max_t(u32, hv->screen_height_max,
 | 
				
			||||||
 | 
								msg->resolution_resp.supported_resolution[i].height);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hv->preferred_width =
 | 
				
			||||||
 | 
							msg->resolution_resp.supported_resolution[index].width;
 | 
				
			||||||
 | 
						hv->preferred_height =
 | 
				
			||||||
 | 
							msg->resolution_resp.supported_resolution[index].height;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void hyperv_receive_sub(struct hv_device *hdev)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
 | 
				
			||||||
 | 
						struct synthvid_msg *msg;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!hv)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						msg = (struct synthvid_msg *)hv->recv_buf;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Complete the wait event */
 | 
				
			||||||
 | 
						if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE ||
 | 
				
			||||||
 | 
						    msg->vid_hdr.type == SYNTHVID_RESOLUTION_RESPONSE ||
 | 
				
			||||||
 | 
						    msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) {
 | 
				
			||||||
 | 
							memcpy(hv->init_buf, msg, VMBUS_MAX_PACKET_SIZE);
 | 
				
			||||||
 | 
							complete(&hv->wait);
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE)
 | 
				
			||||||
 | 
							hv->dirt_needed = msg->feature_chg.is_dirt_needed;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void hyperv_receive(void *ctx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hv_device *hdev = ctx;
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
 | 
				
			||||||
 | 
						struct synthvid_msg *recv_buf;
 | 
				
			||||||
 | 
						u32 bytes_recvd;
 | 
				
			||||||
 | 
						u64 req_id;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!hv)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						recv_buf = (struct synthvid_msg *)hv->recv_buf;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						do {
 | 
				
			||||||
 | 
							ret = vmbus_recvpacket(hdev->channel, recv_buf,
 | 
				
			||||||
 | 
									       VMBUS_MAX_PACKET_SIZE,
 | 
				
			||||||
 | 
									       &bytes_recvd, &req_id);
 | 
				
			||||||
 | 
							if (bytes_recvd > 0 &&
 | 
				
			||||||
 | 
							    recv_buf->pipe_hdr.type == PIPE_MSG_DATA)
 | 
				
			||||||
 | 
								hyperv_receive_sub(hdev);
 | 
				
			||||||
 | 
						} while (bytes_recvd > 0 && ret == 0);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int hyperv_connect_vsp(struct hv_device *hdev)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
 | 
				
			||||||
 | 
						struct drm_device *dev = &hv->dev;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = vmbus_open(hdev->channel, VMBUS_RING_BUFSIZE, VMBUS_RING_BUFSIZE,
 | 
				
			||||||
 | 
								 NULL, 0, hyperv_receive, hdev);
 | 
				
			||||||
 | 
						if (ret) {
 | 
				
			||||||
 | 
							drm_err(dev, "Unable to open vmbus channel\n");
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Negotiate the protocol version with host */
 | 
				
			||||||
 | 
						switch (vmbus_proto_version) {
 | 
				
			||||||
 | 
						case VERSION_WIN10:
 | 
				
			||||||
 | 
						case VERSION_WIN10_V5:
 | 
				
			||||||
 | 
							ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10);
 | 
				
			||||||
 | 
							if (!ret)
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							fallthrough;
 | 
				
			||||||
 | 
						case VERSION_WIN8:
 | 
				
			||||||
 | 
						case VERSION_WIN8_1:
 | 
				
			||||||
 | 
							ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN8);
 | 
				
			||||||
 | 
							if (!ret)
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							fallthrough;
 | 
				
			||||||
 | 
						case VERSION_WS2008:
 | 
				
			||||||
 | 
						case VERSION_WIN7:
 | 
				
			||||||
 | 
							ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN7);
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						default:
 | 
				
			||||||
 | 
							ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10);
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (ret) {
 | 
				
			||||||
 | 
							drm_err(dev, "Synthetic video device version not accepted %d\n", ret);
 | 
				
			||||||
 | 
							goto error;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (hv->synthvid_version == SYNTHVID_VERSION_WIN7)
 | 
				
			||||||
 | 
							hv->screen_depth = SYNTHVID_DEPTH_WIN7;
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							hv->screen_depth = SYNTHVID_DEPTH_WIN8;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (hyperv_version_ge(hv->synthvid_version, SYNTHVID_VERSION_WIN10)) {
 | 
				
			||||||
 | 
							ret = hyperv_get_supported_resolution(hdev);
 | 
				
			||||||
 | 
							if (ret)
 | 
				
			||||||
 | 
								drm_err(dev, "Failed to get supported resolution from host, use default\n");
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							hv->screen_width_max = SYNTHVID_WIDTH_MAX_WIN7;
 | 
				
			||||||
 | 
							hv->screen_height_max = SYNTHVID_HEIGHT_MAX_WIN7;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hv->mmio_megabytes = hdev->channel->offermsg.offer.mmio_megabytes;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					error:
 | 
				
			||||||
 | 
						vmbus_close(hdev->channel);
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -11040,7 +11040,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
 | 
				
			||||||
		if (ret < 0)
 | 
							if (ret < 0)
 | 
				
			||||||
			goto unpin_fb;
 | 
								goto unpin_fb;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		fence = dma_resv_get_excl_rcu(obj->base.resv);
 | 
							fence = dma_resv_get_excl_unlocked(obj->base.resv);
 | 
				
			||||||
		if (fence) {
 | 
							if (fence) {
 | 
				
			||||||
			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
 | 
								add_rps_boost_after_vblank(new_plane_state->hw.crtc,
 | 
				
			||||||
						   fence);
 | 
											   fence);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,7 +10,7 @@
 | 
				
			||||||
void dma_resv_prune(struct dma_resv *resv)
 | 
					void dma_resv_prune(struct dma_resv *resv)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (dma_resv_trylock(resv)) {
 | 
						if (dma_resv_trylock(resv)) {
 | 
				
			||||||
		if (dma_resv_test_signaled_rcu(resv, true))
 | 
							if (dma_resv_test_signaled(resv, true))
 | 
				
			||||||
			dma_resv_add_excl_fence(resv, NULL);
 | 
								dma_resv_add_excl_fence(resv, NULL);
 | 
				
			||||||
		dma_resv_unlock(resv);
 | 
							dma_resv_unlock(resv);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 | 
				
			||||||
	 * Alternatively, we can trade that extra information on read/write
 | 
						 * Alternatively, we can trade that extra information on read/write
 | 
				
			||||||
	 * activity with
 | 
						 * activity with
 | 
				
			||||||
	 *	args->busy =
 | 
						 *	args->busy =
 | 
				
			||||||
	 *		!dma_resv_test_signaled_rcu(obj->resv, true);
 | 
						 *		!dma_resv_test_signaled(obj->resv, true);
 | 
				
			||||||
	 * to report the overall busyness. This is what the wait-ioctl does.
 | 
						 * to report the overall busyness. This is what the wait-ioctl does.
 | 
				
			||||||
	 *
 | 
						 *
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
| 
						 | 
					@ -113,11 +113,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 | 
				
			||||||
	seq = raw_read_seqcount(&obj->base.resv->seq);
 | 
						seq = raw_read_seqcount(&obj->base.resv->seq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Translate the exclusive fence to the READ *and* WRITE engine */
 | 
						/* Translate the exclusive fence to the READ *and* WRITE engine */
 | 
				
			||||||
	args->busy =
 | 
						args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
 | 
				
			||||||
		busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Translate shared fences to READ set of engines */
 | 
						/* Translate shared fences to READ set of engines */
 | 
				
			||||||
	list = rcu_dereference(obj->base.resv->fence);
 | 
						list = dma_resv_shared_list(obj->base.resv);
 | 
				
			||||||
	if (list) {
 | 
						if (list) {
 | 
				
			||||||
		unsigned int shared_count = list->shared_count, i;
 | 
							unsigned int shared_count = list->shared_count, i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma)
 | 
				
			||||||
	if (DBG_FORCE_RELOC)
 | 
						if (DBG_FORCE_RELOC)
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return !dma_resv_test_signaled_rcu(vma->resv, true);
 | 
						return !dma_resv_test_signaled(vma->resv, true);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
 | 
					static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -500,7 +500,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 | 
				
			||||||
	struct dma_fence *fence;
 | 
						struct dma_fence *fence;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rcu_read_lock();
 | 
						rcu_read_lock();
 | 
				
			||||||
	fence = dma_resv_get_excl_rcu(obj->base.resv);
 | 
						fence = dma_resv_get_excl_unlocked(obj->base.resv);
 | 
				
			||||||
	rcu_read_unlock();
 | 
						rcu_read_unlock();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
 | 
						if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -85,8 +85,8 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
 | 
				
			||||||
		return true;
 | 
							return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* we will unbind on next submission, still have userptr pins */
 | 
						/* we will unbind on next submission, still have userptr pins */
 | 
				
			||||||
	r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
 | 
						r = dma_resv_wait_timeout(obj->base.resv, true, false,
 | 
				
			||||||
				      MAX_SCHEDULE_TIMEOUT);
 | 
									  MAX_SCHEDULE_TIMEOUT);
 | 
				
			||||||
	if (r <= 0)
 | 
						if (r <= 0)
 | 
				
			||||||
		drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
 | 
							drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
 | 
				
			||||||
		unsigned int count, i;
 | 
							unsigned int count, i;
 | 
				
			||||||
		int ret;
 | 
							int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
 | 
							ret = dma_resv_get_fences(resv, &excl, &count, &shared);
 | 
				
			||||||
		if (ret)
 | 
							if (ret)
 | 
				
			||||||
			return ret;
 | 
								return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -73,7 +73,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		prune_fences = count && timeout >= 0;
 | 
							prune_fences = count && timeout >= 0;
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		excl = dma_resv_get_excl_rcu(resv);
 | 
							excl = dma_resv_get_excl_unlocked(resv);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (excl && timeout >= 0)
 | 
						if (excl && timeout >= 0)
 | 
				
			||||||
| 
						 | 
					@ -158,8 +158,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 | 
				
			||||||
		unsigned int count, i;
 | 
							unsigned int count, i;
 | 
				
			||||||
		int ret;
 | 
							int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ret = dma_resv_get_fences_rcu(obj->base.resv,
 | 
							ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
 | 
				
			||||||
					      &excl, &count, &shared);
 | 
										  &shared);
 | 
				
			||||||
		if (ret)
 | 
							if (ret)
 | 
				
			||||||
			return ret;
 | 
								return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -170,7 +170,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		kfree(shared);
 | 
							kfree(shared);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		excl = dma_resv_get_excl_rcu(obj->base.resv);
 | 
							excl = dma_resv_get_excl_unlocked(obj->base.resv);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (excl) {
 | 
						if (excl) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1594,8 +1594,8 @@ i915_request_await_object(struct i915_request *to,
 | 
				
			||||||
		struct dma_fence **shared;
 | 
							struct dma_fence **shared;
 | 
				
			||||||
		unsigned int count, i;
 | 
							unsigned int count, i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ret = dma_resv_get_fences_rcu(obj->base.resv,
 | 
							ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
 | 
				
			||||||
							&excl, &count, &shared);
 | 
										  &shared);
 | 
				
			||||||
		if (ret)
 | 
							if (ret)
 | 
				
			||||||
			return ret;
 | 
								return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1611,7 +1611,7 @@ i915_request_await_object(struct i915_request *to,
 | 
				
			||||||
			dma_fence_put(shared[i]);
 | 
								dma_fence_put(shared[i]);
 | 
				
			||||||
		kfree(shared);
 | 
							kfree(shared);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		excl = dma_resv_get_excl_rcu(obj->base.resv);
 | 
							excl = dma_resv_get_excl_unlocked(obj->base.resv);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (excl) {
 | 
						if (excl) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 | 
				
			||||||
		struct dma_fence **shared;
 | 
							struct dma_fence **shared;
 | 
				
			||||||
		unsigned int count, i;
 | 
							unsigned int count, i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
 | 
							ret = dma_resv_get_fences(resv, &excl, &count, &shared);
 | 
				
			||||||
		if (ret)
 | 
							if (ret)
 | 
				
			||||||
			return ret;
 | 
								return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -606,7 +606,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 | 
				
			||||||
			dma_fence_put(shared[i]);
 | 
								dma_fence_put(shared[i]);
 | 
				
			||||||
		kfree(shared);
 | 
							kfree(shared);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		excl = dma_resv_get_excl_rcu(resv);
 | 
							excl = dma_resv_get_excl_unlocked(resv);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (ret >= 0 && excl && excl->ops != exclude) {
 | 
						if (ret >= 0 && excl && excl->ops != exclude) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -30,9 +30,8 @@ struct mtk_disp_color_data {
 | 
				
			||||||
	unsigned int color_offset;
 | 
						unsigned int color_offset;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/*
 | 
				
			||||||
 * struct mtk_disp_color - DISP_COLOR driver structure
 | 
					 * struct mtk_disp_color - DISP_COLOR driver structure
 | 
				
			||||||
 * @ddp_comp: structure containing type enum and hardware resources
 | 
					 | 
				
			||||||
 * @crtc: associated crtc to report irq events to
 | 
					 * @crtc: associated crtc to report irq events to
 | 
				
			||||||
 * @data: platform colour driver data
 | 
					 * @data: platform colour driver data
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -29,10 +29,8 @@ struct mtk_disp_gamma_data {
 | 
				
			||||||
	bool has_dither;
 | 
						bool has_dither;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/*
 | 
				
			||||||
 * struct mtk_disp_gamma - DISP_GAMMA driver structure
 | 
					 * struct mtk_disp_gamma - DISP_GAMMA driver structure
 | 
				
			||||||
 * @ddp_comp - structure containing type enum and hardware resources
 | 
					 | 
				
			||||||
 * @crtc - associated crtc to report irq events to
 | 
					 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct mtk_disp_gamma {
 | 
					struct mtk_disp_gamma {
 | 
				
			||||||
	struct clk *clk;
 | 
						struct clk *clk;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -66,9 +66,8 @@ struct mtk_disp_ovl_data {
 | 
				
			||||||
	bool smi_id_en;
 | 
						bool smi_id_en;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/*
 | 
				
			||||||
 * struct mtk_disp_ovl - DISP_OVL driver structure
 | 
					 * struct mtk_disp_ovl - DISP_OVL driver structure
 | 
				
			||||||
 * @ddp_comp: structure containing type enum and hardware resources
 | 
					 | 
				
			||||||
 * @crtc: associated crtc to report vblank events to
 | 
					 * @crtc: associated crtc to report vblank events to
 | 
				
			||||||
 * @data: platform data
 | 
					 * @data: platform data
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -55,10 +55,8 @@ struct mtk_disp_rdma_data {
 | 
				
			||||||
	unsigned int fifo_size;
 | 
						unsigned int fifo_size;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/*
 | 
				
			||||||
 * struct mtk_disp_rdma - DISP_RDMA driver structure
 | 
					 * struct mtk_disp_rdma - DISP_RDMA driver structure
 | 
				
			||||||
 * @ddp_comp: structure containing type enum and hardware resources
 | 
					 | 
				
			||||||
 * @crtc: associated crtc to report irq events to
 | 
					 | 
				
			||||||
 * @data: local driver data
 | 
					 * @data: local driver data
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct mtk_disp_rdma {
 | 
					struct mtk_disp_rdma {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -7,6 +7,8 @@ config DRM_MSM
 | 
				
			||||||
	depends on IOMMU_SUPPORT
 | 
						depends on IOMMU_SUPPORT
 | 
				
			||||||
	depends on OF && COMMON_CLK
 | 
						depends on OF && COMMON_CLK
 | 
				
			||||||
	depends on QCOM_OCMEM || QCOM_OCMEM=n
 | 
						depends on QCOM_OCMEM || QCOM_OCMEM=n
 | 
				
			||||||
 | 
						depends on QCOM_LLCC || QCOM_LLCC=n
 | 
				
			||||||
 | 
						depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
 | 
				
			||||||
	select IOMMU_IO_PGTABLE
 | 
						select IOMMU_IO_PGTABLE
 | 
				
			||||||
	select QCOM_MDT_LOADER if ARCH_QCOM
 | 
						select QCOM_MDT_LOADER if ARCH_QCOM
 | 
				
			||||||
	select REGULATOR
 | 
						select REGULATOR
 | 
				
			||||||
| 
						 | 
					@ -15,7 +17,6 @@ config DRM_MSM
 | 
				
			||||||
	select SHMEM
 | 
						select SHMEM
 | 
				
			||||||
	select TMPFS
 | 
						select TMPFS
 | 
				
			||||||
	select QCOM_SCM if ARCH_QCOM
 | 
						select QCOM_SCM if ARCH_QCOM
 | 
				
			||||||
	select QCOM_COMMAND_DB if ARCH_QCOM
 | 
					 | 
				
			||||||
	select WANT_DEV_COREDUMP
 | 
						select WANT_DEV_COREDUMP
 | 
				
			||||||
	select SND_SOC_HDMI_CODEC if SND_SOC
 | 
						select SND_SOC_HDMI_CODEC if SND_SOC
 | 
				
			||||||
	select SYNC_FILE
 | 
						select SYNC_FILE
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -817,9 +817,9 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 | 
				
			||||||
	struct dma_fence *fence;
 | 
						struct dma_fence *fence;
 | 
				
			||||||
	int i, ret;
 | 
						int i, ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fobj = dma_resv_get_list(obj->resv);
 | 
						fobj = dma_resv_shared_list(obj->resv);
 | 
				
			||||||
	if (!fobj || (fobj->shared_count == 0)) {
 | 
						if (!fobj || (fobj->shared_count == 0)) {
 | 
				
			||||||
		fence = dma_resv_get_excl(obj->resv);
 | 
							fence = dma_resv_excl_fence(obj->resv);
 | 
				
			||||||
		/* don't need to wait on our own fences, since ring is fifo */
 | 
							/* don't need to wait on our own fences, since ring is fifo */
 | 
				
			||||||
		if (fence && (fence->context != fctx->context)) {
 | 
							if (fence && (fence->context != fctx->context)) {
 | 
				
			||||||
			ret = dma_fence_wait(fence, true);
 | 
								ret = dma_fence_wait(fence, true);
 | 
				
			||||||
| 
						 | 
					@ -915,8 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 | 
				
			||||||
		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 | 
							op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 | 
				
			||||||
	long ret;
 | 
						long ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
 | 
						ret = dma_resv_wait_timeout(obj->resv, write, true,  remain);
 | 
				
			||||||
						  true,  remain);
 | 
					 | 
				
			||||||
	if (ret == 0)
 | 
						if (ret == 0)
 | 
				
			||||||
		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 | 
							return remain == 0 ? -EBUSY : -ETIMEDOUT;
 | 
				
			||||||
	else if (ret < 0)
 | 
						else if (ret < 0)
 | 
				
			||||||
| 
						 | 
					@ -1025,7 +1024,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rcu_read_lock();
 | 
						rcu_read_lock();
 | 
				
			||||||
	fobj = rcu_dereference(robj->fence);
 | 
						fobj = dma_resv_shared_list(robj);
 | 
				
			||||||
	if (fobj) {
 | 
						if (fobj) {
 | 
				
			||||||
		unsigned int i, shared_count = fobj->shared_count;
 | 
							unsigned int i, shared_count = fobj->shared_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1035,7 +1034,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fence = rcu_dereference(robj->fence_excl);
 | 
						fence = dma_resv_excl_fence(robj);
 | 
				
			||||||
	if (fence)
 | 
						if (fence)
 | 
				
			||||||
		describe_fence(fence, "Exclusive", m);
 | 
							describe_fence(fence, "Exclusive", m);
 | 
				
			||||||
	rcu_read_unlock();
 | 
						rcu_read_unlock();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -561,7 +561,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 | 
				
			||||||
			asyw->image.handle[0] = ctxdma->object.handle;
 | 
								asyw->image.handle[0] = ctxdma->object.handle;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
 | 
						asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv);
 | 
				
			||||||
	asyw->image.offset[0] = nvbo->offset;
 | 
						asyw->image.offset[0] = nvbo->offset;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (wndw->func->prepare) {
 | 
						if (wndw->func->prepare) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -312,7 +312,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 | 
				
			||||||
		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
 | 
							init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
 | 
				
			||||||
					NOUVEAU_GEM_DOMAIN_GART;
 | 
										NOUVEAU_GEM_DOMAIN_GART;
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
	if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
 | 
						if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
 | 
				
			||||||
		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
 | 
							init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
 | 
							init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -433,7 +433,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
 | 
				
			||||||
	if (nvbo->bo.pin_count) {
 | 
						if (nvbo->bo.pin_count) {
 | 
				
			||||||
		bool error = evict;
 | 
							bool error = evict;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		switch (bo->mem.mem_type) {
 | 
							switch (bo->resource->mem_type) {
 | 
				
			||||||
		case TTM_PL_VRAM:
 | 
							case TTM_PL_VRAM:
 | 
				
			||||||
			error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
 | 
								error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
| 
						 | 
					@ -446,7 +446,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
 | 
				
			||||||
		if (error) {
 | 
							if (error) {
 | 
				
			||||||
			NV_ERROR(drm, "bo %p pinned elsewhere: "
 | 
								NV_ERROR(drm, "bo %p pinned elsewhere: "
 | 
				
			||||||
				      "0x%08x vs 0x%08x\n", bo,
 | 
									      "0x%08x vs 0x%08x\n", bo,
 | 
				
			||||||
				 bo->mem.mem_type, domain);
 | 
									 bo->resource->mem_type, domain);
 | 
				
			||||||
			ret = -EBUSY;
 | 
								ret = -EBUSY;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		ttm_bo_pin(&nvbo->bo);
 | 
							ttm_bo_pin(&nvbo->bo);
 | 
				
			||||||
| 
						 | 
					@ -467,7 +467,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ttm_bo_pin(&nvbo->bo);
 | 
						ttm_bo_pin(&nvbo->bo);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (bo->mem.mem_type) {
 | 
						switch (bo->resource->mem_type) {
 | 
				
			||||||
	case TTM_PL_VRAM:
 | 
						case TTM_PL_VRAM:
 | 
				
			||||||
		drm->gem.vram_available -= bo->base.size;
 | 
							drm->gem.vram_available -= bo->base.size;
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
| 
						 | 
					@ -498,7 +498,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ttm_bo_unpin(&nvbo->bo);
 | 
						ttm_bo_unpin(&nvbo->bo);
 | 
				
			||||||
	if (!nvbo->bo.pin_count) {
 | 
						if (!nvbo->bo.pin_count) {
 | 
				
			||||||
		switch (bo->mem.mem_type) {
 | 
							switch (bo->resource->mem_type) {
 | 
				
			||||||
		case TTM_PL_VRAM:
 | 
							case TTM_PL_VRAM:
 | 
				
			||||||
			drm->gem.vram_available += bo->base.size;
 | 
								drm->gem.vram_available += bo->base.size;
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
| 
						 | 
					@ -523,7 +523,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
 | 
						ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ttm_bo_unreserve(&nvbo->bo);
 | 
						ttm_bo_unreserve(&nvbo->bo);
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
| 
						 | 
					@ -737,7 +737,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nouveau_bo *nvbo = nouveau_bo(bo);
 | 
						struct nouveau_bo *nvbo = nouveau_bo(bo);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (bo->mem.mem_type) {
 | 
						switch (bo->resource->mem_type) {
 | 
				
			||||||
	case TTM_PL_VRAM:
 | 
						case TTM_PL_VRAM:
 | 
				
			||||||
		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
 | 
							nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
 | 
				
			||||||
					 NOUVEAU_GEM_DOMAIN_CPU);
 | 
										 NOUVEAU_GEM_DOMAIN_CPU);
 | 
				
			||||||
| 
						 | 
					@ -754,7 +754,7 @@ static int
 | 
				
			||||||
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
 | 
					nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
 | 
				
			||||||
		     struct ttm_resource *reg)
 | 
							     struct ttm_resource *reg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
 | 
						struct nouveau_mem *old_mem = nouveau_mem(bo->resource);
 | 
				
			||||||
	struct nouveau_mem *new_mem = nouveau_mem(reg);
 | 
						struct nouveau_mem *new_mem = nouveau_mem(reg);
 | 
				
			||||||
	struct nvif_vmm *vmm = &drm->client.vmm.vmm;
 | 
						struct nvif_vmm *vmm = &drm->client.vmm.vmm;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
| 
						 | 
					@ -809,7 +809,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
 | 
				
			||||||
		mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
 | 
							mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
 | 
				
			||||||
	ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
 | 
						ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
 | 
				
			||||||
	if (ret == 0) {
 | 
						if (ret == 0) {
 | 
				
			||||||
		ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
 | 
							ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
 | 
				
			||||||
		if (ret == 0) {
 | 
							if (ret == 0) {
 | 
				
			||||||
			ret = nouveau_fence_new(chan, false, &fence);
 | 
								ret = nouveau_fence_new(chan, false, &fence);
 | 
				
			||||||
			if (ret == 0) {
 | 
								if (ret == 0) {
 | 
				
			||||||
| 
						 | 
					@ -918,12 +918,8 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (new_reg) {
 | 
						if (new_reg)
 | 
				
			||||||
		if (new_reg->mm_node)
 | 
							nvbo->offset = (new_reg->start << PAGE_SHIFT);
 | 
				
			||||||
			nvbo->offset = (new_reg->start << PAGE_SHIFT);
 | 
					 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			nvbo->offset = 0;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -955,7 +951,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 | 
						struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 | 
				
			||||||
	struct drm_device *dev = drm->dev;
 | 
						struct drm_device *dev = drm->dev;
 | 
				
			||||||
	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
 | 
						struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nv10_bo_put_tile_region(dev, *old_tile, fence);
 | 
						nv10_bo_put_tile_region(dev, *old_tile, fence);
 | 
				
			||||||
	*old_tile = new_tile;
 | 
						*old_tile = new_tile;
 | 
				
			||||||
| 
						 | 
					@ -969,7 +965,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 | 
						struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 | 
				
			||||||
	struct nouveau_bo *nvbo = nouveau_bo(bo);
 | 
						struct nouveau_bo *nvbo = nouveau_bo(bo);
 | 
				
			||||||
	struct ttm_resource *old_reg = &bo->mem;
 | 
						struct ttm_resource *old_reg = bo->resource;
 | 
				
			||||||
	struct nouveau_drm_tile *new_tile = NULL;
 | 
						struct nouveau_drm_tile *new_tile = NULL;
 | 
				
			||||||
	int ret = 0;
 | 
						int ret = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1009,7 +1005,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
 | 
				
			||||||
	if (old_reg->mem_type == TTM_PL_TT &&
 | 
						if (old_reg->mem_type == TTM_PL_TT &&
 | 
				
			||||||
	    new_reg->mem_type == TTM_PL_SYSTEM) {
 | 
						    new_reg->mem_type == TTM_PL_SYSTEM) {
 | 
				
			||||||
		nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
 | 
							nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
 | 
				
			||||||
		ttm_resource_free(bo, &bo->mem);
 | 
							ttm_resource_free(bo, &bo->resource);
 | 
				
			||||||
		ttm_bo_assign_mem(bo, new_reg);
 | 
							ttm_bo_assign_mem(bo, new_reg);
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -1045,7 +1041,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
out_ntfy:
 | 
					out_ntfy:
 | 
				
			||||||
	if (ret) {
 | 
						if (ret) {
 | 
				
			||||||
		nouveau_bo_move_ntfy(bo, &bo->mem);
 | 
							nouveau_bo_move_ntfy(bo, bo->resource);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1170,7 +1166,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
 | 
				
			||||||
			list_del_init(&nvbo->io_reserve_lru);
 | 
								list_del_init(&nvbo->io_reserve_lru);
 | 
				
			||||||
			drm_vma_node_unmap(&nvbo->bo.base.vma_node,
 | 
								drm_vma_node_unmap(&nvbo->bo.base.vma_node,
 | 
				
			||||||
					   bdev->dev_mapping);
 | 
										   bdev->dev_mapping);
 | 
				
			||||||
			nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
 | 
								nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
 | 
				
			||||||
			goto retry;
 | 
								goto retry;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1200,12 +1196,12 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 | 
				
			||||||
	/* as long as the bo isn't in vram, and isn't tiled, we've got
 | 
						/* as long as the bo isn't in vram, and isn't tiled, we've got
 | 
				
			||||||
	 * nothing to do here.
 | 
						 * nothing to do here.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (bo->mem.mem_type != TTM_PL_VRAM) {
 | 
						if (bo->resource->mem_type != TTM_PL_VRAM) {
 | 
				
			||||||
		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
 | 
							if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
 | 
				
			||||||
		    !nvbo->kind)
 | 
							    !nvbo->kind)
 | 
				
			||||||
			return 0;
 | 
								return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (bo->mem.mem_type != TTM_PL_SYSTEM)
 | 
							if (bo->resource->mem_type != TTM_PL_SYSTEM)
 | 
				
			||||||
			return 0;
 | 
								return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
 | 
							nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
 | 
				
			||||||
| 
						 | 
					@ -1213,7 +1209,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		/* make sure bo is in mappable vram */
 | 
							/* make sure bo is in mappable vram */
 | 
				
			||||||
		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
 | 
							if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
 | 
				
			||||||
		    bo->mem.start + bo->mem.num_pages < mappable)
 | 
							    bo->resource->start + bo->resource->num_pages < mappable)
 | 
				
			||||||
			return 0;
 | 
								return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for (i = 0; i < nvbo->placement.num_placement; ++i) {
 | 
							for (i = 0; i < nvbo->placement.num_placement; ++i) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -212,7 +212,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 | 
				
			||||||
		args.start = 0;
 | 
							args.start = 0;
 | 
				
			||||||
		args.limit = chan->vmm->vmm.limit - 1;
 | 
							args.limit = chan->vmm->vmm.limit - 1;
 | 
				
			||||||
	} else
 | 
						} else
 | 
				
			||||||
	if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
 | 
						if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
 | 
				
			||||||
		if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
 | 
							if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
 | 
				
			||||||
			/* nv04 vram pushbuf hack, retarget to its location in
 | 
								/* nv04 vram pushbuf hack, retarget to its location in
 | 
				
			||||||
			 * the framebuffer bar rather than direct vram access..
 | 
								 * the framebuffer bar rather than direct vram access..
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -378,7 +378,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 | 
				
			||||||
			      FBINFO_HWACCEL_FILLRECT |
 | 
								      FBINFO_HWACCEL_FILLRECT |
 | 
				
			||||||
			      FBINFO_HWACCEL_IMAGEBLIT;
 | 
								      FBINFO_HWACCEL_IMAGEBLIT;
 | 
				
			||||||
	info->fbops = &nouveau_fbcon_sw_ops;
 | 
						info->fbops = &nouveau_fbcon_sw_ops;
 | 
				
			||||||
	info->fix.smem_start = nvbo->bo.mem.bus.offset;
 | 
						info->fix.smem_start = nvbo->bo.resource->bus.offset;
 | 
				
			||||||
	info->fix.smem_len = nvbo->bo.base.size;
 | 
						info->fix.smem_len = nvbo->bo.base.size;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
 | 
						info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -355,8 +355,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 | 
				
			||||||
			return ret;
 | 
								return ret;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fobj = dma_resv_get_list(resv);
 | 
						fobj = dma_resv_shared_list(resv);
 | 
				
			||||||
	fence = dma_resv_get_excl(resv);
 | 
						fence = dma_resv_excl_fence(resv);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
 | 
						if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
 | 
				
			||||||
		struct nouveau_channel *prev = NULL;
 | 
							struct nouveau_channel *prev = NULL;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -276,7 +276,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (is_power_of_2(nvbo->valid_domains))
 | 
						if (is_power_of_2(nvbo->valid_domains))
 | 
				
			||||||
		rep->domain = nvbo->valid_domains;
 | 
							rep->domain = nvbo->valid_domains;
 | 
				
			||||||
	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
 | 
						else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 | 
				
			||||||
		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
 | 
							rep->domain = NOUVEAU_GEM_DOMAIN_GART;
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 | 
							rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 | 
				
			||||||
| 
						 | 
					@ -347,11 +347,11 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 | 
				
			||||||
	valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
 | 
						valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 | 
						if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 | 
				
			||||||
	    bo->mem.mem_type == TTM_PL_VRAM)
 | 
						    bo->resource->mem_type == TTM_PL_VRAM)
 | 
				
			||||||
		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 | 
							pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
 | 
						else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
 | 
				
			||||||
		 bo->mem.mem_type == TTM_PL_TT)
 | 
							 bo->resource->mem_type == TTM_PL_TT)
 | 
				
			||||||
		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 | 
							pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
 | 
						else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
 | 
				
			||||||
| 
						 | 
					@ -561,13 +561,13 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
 | 
							if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
 | 
				
			||||||
			if (nvbo->offset == b->presumed.offset &&
 | 
								if (nvbo->offset == b->presumed.offset &&
 | 
				
			||||||
			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
 | 
								    ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
 | 
				
			||||||
			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
 | 
								      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
 | 
				
			||||||
			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
 | 
								     (nvbo->bo.resource->mem_type == TTM_PL_TT &&
 | 
				
			||||||
			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
 | 
								      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
 | 
								if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 | 
				
			||||||
				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
 | 
									b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
 | 
				
			||||||
			else
 | 
								else
 | 
				
			||||||
				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
 | 
									b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
 | 
				
			||||||
| 
						 | 
					@ -681,7 +681,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!nvbo->kmap.virtual) {
 | 
							if (!nvbo->kmap.virtual) {
 | 
				
			||||||
			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
 | 
								ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
 | 
				
			||||||
					  &nvbo->kmap);
 | 
										  &nvbo->kmap);
 | 
				
			||||||
			if (ret) {
 | 
								if (ret) {
 | 
				
			||||||
				NV_PRINTK(err, cli, "failed kmap for reloc\n");
 | 
									NV_PRINTK(err, cli, "failed kmap for reloc\n");
 | 
				
			||||||
| 
						 | 
					@ -870,7 +870,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 | 
				
			||||||
			if (unlikely(cmd != req->suffix0)) {
 | 
								if (unlikely(cmd != req->suffix0)) {
 | 
				
			||||||
				if (!nvbo->kmap.virtual) {
 | 
									if (!nvbo->kmap.virtual) {
 | 
				
			||||||
					ret = ttm_bo_kmap(&nvbo->bo, 0,
 | 
										ret = ttm_bo_kmap(&nvbo->bo, 0,
 | 
				
			||||||
							  nvbo->bo.mem.
 | 
												  nvbo->bo.resource->
 | 
				
			||||||
							  num_pages,
 | 
												  num_pages,
 | 
				
			||||||
							  &nvbo->kmap);
 | 
												  &nvbo->kmap);
 | 
				
			||||||
					if (ret) {
 | 
										if (ret) {
 | 
				
			||||||
| 
						 | 
					@ -964,8 +964,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 | 
				
			||||||
		return -ENOENT;
 | 
							return -ENOENT;
 | 
				
			||||||
	nvbo = nouveau_gem_object(gem);
 | 
						nvbo = nouveau_gem_object(gem);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
 | 
						lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
 | 
				
			||||||
						   no_wait ? 0 : 30 * HZ);
 | 
									     no_wait ? 0 : 30 * HZ);
 | 
				
			||||||
	if (!lret)
 | 
						if (!lret)
 | 
				
			||||||
		ret = -EBUSY;
 | 
							ret = -EBUSY;
 | 
				
			||||||
	else if (lret > 0)
 | 
						else if (lret > 0)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -178,25 +178,24 @@ void
 | 
				
			||||||
nouveau_mem_del(struct ttm_resource *reg)
 | 
					nouveau_mem_del(struct ttm_resource *reg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nouveau_mem *mem = nouveau_mem(reg);
 | 
						struct nouveau_mem *mem = nouveau_mem(reg);
 | 
				
			||||||
	if (!mem)
 | 
					
 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	nouveau_mem_fini(mem);
 | 
						nouveau_mem_fini(mem);
 | 
				
			||||||
	kfree(reg->mm_node);
 | 
						kfree(mem);
 | 
				
			||||||
	reg->mm_node = NULL;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int
 | 
					int
 | 
				
			||||||
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
 | 
					nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
 | 
				
			||||||
		struct ttm_resource *reg)
 | 
							struct ttm_resource **res)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nouveau_mem *mem;
 | 
						struct nouveau_mem *mem;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
 | 
						if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mem->cli = cli;
 | 
						mem->cli = cli;
 | 
				
			||||||
	mem->kind = kind;
 | 
						mem->kind = kind;
 | 
				
			||||||
	mem->comp = comp;
 | 
						mem->comp = comp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	reg->mm_node = mem;
 | 
						*res = &mem->base;
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -6,13 +6,8 @@ struct ttm_tt;
 | 
				
			||||||
#include <nvif/mem.h>
 | 
					#include <nvif/mem.h>
 | 
				
			||||||
#include <nvif/vmm.h>
 | 
					#include <nvif/vmm.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline struct nouveau_mem *
 | 
					 | 
				
			||||||
nouveau_mem(struct ttm_resource *reg)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return reg->mm_node;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct nouveau_mem {
 | 
					struct nouveau_mem {
 | 
				
			||||||
 | 
						struct ttm_resource base;
 | 
				
			||||||
	struct nouveau_cli *cli;
 | 
						struct nouveau_cli *cli;
 | 
				
			||||||
	u8 kind;
 | 
						u8 kind;
 | 
				
			||||||
	u8 comp;
 | 
						u8 comp;
 | 
				
			||||||
| 
						 | 
					@ -20,8 +15,14 @@ struct nouveau_mem {
 | 
				
			||||||
	struct nvif_vma vma[2];
 | 
						struct nvif_vma vma[2];
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline struct nouveau_mem *
 | 
				
			||||||
 | 
					nouveau_mem(struct ttm_resource *reg)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return container_of(reg, struct nouveau_mem, base);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
 | 
					int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
 | 
				
			||||||
		    struct ttm_resource *);
 | 
							    struct ttm_resource **);
 | 
				
			||||||
void nouveau_mem_del(struct ttm_resource *);
 | 
					void nouveau_mem_del(struct ttm_resource *);
 | 
				
			||||||
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
 | 
					int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
 | 
				
			||||||
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
 | 
					int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -26,6 +26,8 @@
 | 
				
			||||||
#include <linux/limits.h>
 | 
					#include <linux/limits.h>
 | 
				
			||||||
#include <linux/swiotlb.h>
 | 
					#include <linux/swiotlb.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <drm/ttm/ttm_range_manager.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "nouveau_drv.h"
 | 
					#include "nouveau_drv.h"
 | 
				
			||||||
#include "nouveau_gem.h"
 | 
					#include "nouveau_gem.h"
 | 
				
			||||||
#include "nouveau_mem.h"
 | 
					#include "nouveau_mem.h"
 | 
				
			||||||
| 
						 | 
					@ -43,7 +45,7 @@ static int
 | 
				
			||||||
nouveau_vram_manager_new(struct ttm_resource_manager *man,
 | 
					nouveau_vram_manager_new(struct ttm_resource_manager *man,
 | 
				
			||||||
			 struct ttm_buffer_object *bo,
 | 
								 struct ttm_buffer_object *bo,
 | 
				
			||||||
			 const struct ttm_place *place,
 | 
								 const struct ttm_place *place,
 | 
				
			||||||
			 struct ttm_resource *reg)
 | 
								 struct ttm_resource **res)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nouveau_bo *nvbo = nouveau_bo(bo);
 | 
						struct nouveau_bo *nvbo = nouveau_bo(bo);
 | 
				
			||||||
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 | 
						struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 | 
				
			||||||
| 
						 | 
					@ -52,13 +54,15 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
 | 
				
			||||||
	if (drm->client.device.info.ram_size == 0)
 | 
						if (drm->client.device.info.ram_size == 0)
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
 | 
						ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
 | 
						ttm_resource_init(bo, place, *res);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
 | 
				
			||||||
	if (ret) {
 | 
						if (ret) {
 | 
				
			||||||
		nouveau_mem_del(reg);
 | 
							nouveau_mem_del(*res);
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -74,17 +78,18 @@ static int
 | 
				
			||||||
nouveau_gart_manager_new(struct ttm_resource_manager *man,
 | 
					nouveau_gart_manager_new(struct ttm_resource_manager *man,
 | 
				
			||||||
			 struct ttm_buffer_object *bo,
 | 
								 struct ttm_buffer_object *bo,
 | 
				
			||||||
			 const struct ttm_place *place,
 | 
								 const struct ttm_place *place,
 | 
				
			||||||
			 struct ttm_resource *reg)
 | 
								 struct ttm_resource **res)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nouveau_bo *nvbo = nouveau_bo(bo);
 | 
						struct nouveau_bo *nvbo = nouveau_bo(bo);
 | 
				
			||||||
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 | 
						struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
 | 
						ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	reg->start = 0;
 | 
						ttm_resource_init(bo, place, *res);
 | 
				
			||||||
 | 
						(*res)->start = 0;
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -97,26 +102,27 @@ static int
 | 
				
			||||||
nv04_gart_manager_new(struct ttm_resource_manager *man,
 | 
					nv04_gart_manager_new(struct ttm_resource_manager *man,
 | 
				
			||||||
		      struct ttm_buffer_object *bo,
 | 
							      struct ttm_buffer_object *bo,
 | 
				
			||||||
		      const struct ttm_place *place,
 | 
							      const struct ttm_place *place,
 | 
				
			||||||
		      struct ttm_resource *reg)
 | 
							      struct ttm_resource **res)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nouveau_bo *nvbo = nouveau_bo(bo);
 | 
						struct nouveau_bo *nvbo = nouveau_bo(bo);
 | 
				
			||||||
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 | 
						struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 | 
				
			||||||
	struct nouveau_mem *mem;
 | 
						struct nouveau_mem *mem;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
 | 
						ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
 | 
				
			||||||
	mem = nouveau_mem(reg);
 | 
					 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mem = nouveau_mem(*res);
 | 
				
			||||||
 | 
						ttm_resource_init(bo, place, *res);
 | 
				
			||||||
	ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
 | 
						ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
 | 
				
			||||||
			   (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
 | 
								   (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
 | 
				
			||||||
	if (ret) {
 | 
						if (ret) {
 | 
				
			||||||
		nouveau_mem_del(reg);
 | 
							nouveau_mem_del(*res);
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	reg->start = mem->vma[0].addr >> PAGE_SHIFT;
 | 
						(*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -77,7 +77,7 @@ int
 | 
				
			||||||
nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
 | 
					nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
 | 
				
			||||||
		struct nouveau_vma **pvma)
 | 
							struct nouveau_vma **pvma)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
 | 
						struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
 | 
				
			||||||
	struct nouveau_vma *vma;
 | 
						struct nouveau_vma *vma;
 | 
				
			||||||
	struct nvif_vma tmp;
 | 
						struct nvif_vma tmp;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
| 
						 | 
					@ -96,7 +96,7 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
 | 
				
			||||||
	vma->fence = NULL;
 | 
						vma->fence = NULL;
 | 
				
			||||||
	list_add_tail(&vma->head, &nvbo->vma_list);
 | 
						list_add_tail(&vma->head, &nvbo->vma_list);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
 | 
						if (nvbo->bo.resource->mem_type != TTM_PL_SYSTEM &&
 | 
				
			||||||
	    mem->mem.page == nvbo->page) {
 | 
						    mem->mem.page == nvbo->page) {
 | 
				
			||||||
		ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
 | 
							ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
 | 
				
			||||||
				   mem->mem.size, &tmp);
 | 
									   mem->mem.size, &tmp);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -77,8 +77,8 @@ static int
 | 
				
			||||||
nv17_fence_context_new(struct nouveau_channel *chan)
 | 
					nv17_fence_context_new(struct nouveau_channel *chan)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nv10_fence_priv *priv = chan->drm->fence;
 | 
						struct nv10_fence_priv *priv = chan->drm->fence;
 | 
				
			||||||
 | 
						struct ttm_resource *reg = priv->bo->bo.resource;
 | 
				
			||||||
	struct nv10_fence_chan *fctx;
 | 
						struct nv10_fence_chan *fctx;
 | 
				
			||||||
	struct ttm_resource *reg = &priv->bo->bo.mem;
 | 
					 | 
				
			||||||
	u32 start = reg->start * PAGE_SIZE;
 | 
						u32 start = reg->start * PAGE_SIZE;
 | 
				
			||||||
	u32 limit = start + priv->bo->bo.base.size - 1;
 | 
						u32 limit = start + priv->bo->bo.base.size - 1;
 | 
				
			||||||
	int ret = 0;
 | 
						int ret = 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -37,7 +37,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nv10_fence_priv *priv = chan->drm->fence;
 | 
						struct nv10_fence_priv *priv = chan->drm->fence;
 | 
				
			||||||
	struct nv10_fence_chan *fctx;
 | 
						struct nv10_fence_chan *fctx;
 | 
				
			||||||
	struct ttm_resource *reg = &priv->bo->bo.mem;
 | 
						struct ttm_resource *reg = priv->bo->bo.resource;
 | 
				
			||||||
	u32 start = reg->start * PAGE_SIZE;
 | 
						u32 start = reg->start * PAGE_SIZE;
 | 
				
			||||||
	u32 limit = start + priv->bo->bo.base.size - 1;
 | 
						u32 limit = start + priv->bo->bo.base.size - 1;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -46,7 +46,7 @@ tu102_mc_intr_update(struct tu102_mc *mc)
 | 
				
			||||||
		nvkm_wr32(device, 0xb81610, 0x6);
 | 
							nvkm_wr32(device, 0xb81610, 0x6);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void
 | 
					static void
 | 
				
			||||||
tu102_mc_intr_unarm(struct nvkm_mc *base)
 | 
					tu102_mc_intr_unarm(struct nvkm_mc *base)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct tu102_mc *mc = tu102_mc(base);
 | 
						struct tu102_mc *mc = tu102_mc(base);
 | 
				
			||||||
| 
						 | 
					@ -58,7 +58,7 @@ tu102_mc_intr_unarm(struct nvkm_mc *base)
 | 
				
			||||||
	spin_unlock_irqrestore(&mc->lock, flags);
 | 
						spin_unlock_irqrestore(&mc->lock, flags);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void
 | 
					static void
 | 
				
			||||||
tu102_mc_intr_rearm(struct nvkm_mc *base)
 | 
					tu102_mc_intr_rearm(struct nvkm_mc *base)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct tu102_mc *mc = tu102_mc(base);
 | 
						struct tu102_mc *mc = tu102_mc(base);
 | 
				
			||||||
| 
						 | 
					@ -70,7 +70,7 @@ tu102_mc_intr_rearm(struct nvkm_mc *base)
 | 
				
			||||||
	spin_unlock_irqrestore(&mc->lock, flags);
 | 
						spin_unlock_irqrestore(&mc->lock, flags);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void
 | 
					static void
 | 
				
			||||||
tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
 | 
					tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct tu102_mc *mc = tu102_mc(base);
 | 
						struct tu102_mc *mc = tu102_mc(base);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -42,6 +42,7 @@ struct kd35t133 {
 | 
				
			||||||
	struct gpio_desc *reset_gpio;
 | 
						struct gpio_desc *reset_gpio;
 | 
				
			||||||
	struct regulator *vdd;
 | 
						struct regulator *vdd;
 | 
				
			||||||
	struct regulator *iovcc;
 | 
						struct regulator *iovcc;
 | 
				
			||||||
 | 
						enum drm_panel_orientation orientation;
 | 
				
			||||||
	bool prepared;
 | 
						bool prepared;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -216,6 +217,7 @@ static int kd35t133_get_modes(struct drm_panel *panel,
 | 
				
			||||||
	connector->display_info.width_mm = mode->width_mm;
 | 
						connector->display_info.width_mm = mode->width_mm;
 | 
				
			||||||
	connector->display_info.height_mm = mode->height_mm;
 | 
						connector->display_info.height_mm = mode->height_mm;
 | 
				
			||||||
	drm_mode_probed_add(connector, mode);
 | 
						drm_mode_probed_add(connector, mode);
 | 
				
			||||||
 | 
						drm_connector_set_panel_orientation(connector, ctx->orientation);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 1;
 | 
						return 1;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -258,6 +260,12 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
 | 
				
			||||||
 | 
						if (ret < 0) {
 | 
				
			||||||
 | 
							dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret);
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mipi_dsi_set_drvdata(dsi, ctx);
 | 
						mipi_dsi_set_drvdata(dsi, ctx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ctx->dev = dev;
 | 
						ctx->dev = dev;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -29,7 +29,7 @@
 | 
				
			||||||
 * DEALINGS IN THE SOFTWARE.
 | 
					 * DEALINGS IN THE SOFTWARE.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/*
 | 
				
			||||||
 * Raspberry Pi 7" touchscreen panel driver.
 | 
					 * Raspberry Pi 7" touchscreen panel driver.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * The 7" touchscreen consists of a DPI LCD panel, a Toshiba
 | 
					 * The 7" touchscreen consists of a DPI LCD panel, a Toshiba
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -38,7 +38,7 @@
 | 
				
			||||||
#define DSI_CMD2_BK1_SPD2		0xC2 /* Source EQ2 Setting */
 | 
					#define DSI_CMD2_BK1_SPD2		0xC2 /* Source EQ2 Setting */
 | 
				
			||||||
#define DSI_CMD2_BK1_MIPISET1		0xD0 /* MIPI Setting 1 */
 | 
					#define DSI_CMD2_BK1_MIPISET1		0xD0 /* MIPI Setting 1 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/*
 | 
				
			||||||
 * Command2 with BK function selection.
 | 
					 * Command2 with BK function selection.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * BIT[4, 0]: [CN2, BKXSEL]
 | 
					 * BIT[4, 0]: [CN2, BKXSEL]
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -45,6 +45,7 @@ struct panfrost_features {
 | 
				
			||||||
	u32 thread_max_workgroup_sz;
 | 
						u32 thread_max_workgroup_sz;
 | 
				
			||||||
	u32 thread_max_barrier_sz;
 | 
						u32 thread_max_barrier_sz;
 | 
				
			||||||
	u32 coherency_features;
 | 
						u32 coherency_features;
 | 
				
			||||||
 | 
						u32 afbc_features;
 | 
				
			||||||
	u32 texture_features[4];
 | 
						u32 texture_features[4];
 | 
				
			||||||
	u32 js_features[16];
 | 
						u32 js_features[16];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -63,6 +63,7 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
 | 
				
			||||||
		PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
 | 
							PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
 | 
				
			||||||
				thread_max_barrier_sz);
 | 
									thread_max_barrier_sz);
 | 
				
			||||||
		PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
 | 
							PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
 | 
				
			||||||
 | 
							PANFROST_FEATURE(AFBC_FEATURES, afbc_features);
 | 
				
			||||||
		PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
 | 
							PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
 | 
				
			||||||
		PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
 | 
							PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
 | 
				
			||||||
		PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
 | 
							PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
 | 
				
			||||||
| 
						 | 
					@ -311,8 +312,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
 | 
				
			||||||
	if (!gem_obj)
 | 
						if (!gem_obj)
 | 
				
			||||||
		return -ENOENT;
 | 
							return -ENOENT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
 | 
						ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout);
 | 
				
			||||||
						  true, timeout);
 | 
					 | 
				
			||||||
	if (!ret)
 | 
						if (!ret)
 | 
				
			||||||
		ret = timeout ? -ETIMEDOUT : -EBUSY;
 | 
							ret = timeout ? -ETIMEDOUT : -EBUSY;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -547,6 +547,7 @@ DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
 | 
				
			||||||
 * Panfrost driver version:
 | 
					 * Panfrost driver version:
 | 
				
			||||||
 * - 1.0 - initial interface
 | 
					 * - 1.0 - initial interface
 | 
				
			||||||
 * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
 | 
					 * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
 | 
				
			||||||
 | 
					 * - 1.2 - adds AFBC_FEATURES query
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static const struct drm_driver panfrost_drm_driver = {
 | 
					static const struct drm_driver panfrost_drm_driver = {
 | 
				
			||||||
	.driver_features	= DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
 | 
						.driver_features	= DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
 | 
				
			||||||
| 
						 | 
					@ -559,7 +560,7 @@ static const struct drm_driver panfrost_drm_driver = {
 | 
				
			||||||
	.desc			= "panfrost DRM",
 | 
						.desc			= "panfrost DRM",
 | 
				
			||||||
	.date			= "20180908",
 | 
						.date			= "20180908",
 | 
				
			||||||
	.major			= 1,
 | 
						.major			= 1,
 | 
				
			||||||
	.minor			= 1,
 | 
						.minor			= 2,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	.gem_create_object	= panfrost_gem_create_object,
 | 
						.gem_create_object	= panfrost_gem_create_object,
 | 
				
			||||||
	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
 | 
						.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -228,6 +228,7 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
 | 
				
			||||||
	pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
 | 
						pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
 | 
				
			||||||
	pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
 | 
						pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
 | 
				
			||||||
	pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
 | 
						pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
 | 
				
			||||||
 | 
						pfdev->features.afbc_features = gpu_read(pfdev, GPU_AFBC_FEATURES);
 | 
				
			||||||
	for (i = 0; i < 4; i++)
 | 
						for (i = 0; i < 4; i++)
 | 
				
			||||||
		pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
 | 
							pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -203,7 +203,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < bo_count; i++)
 | 
						for (i = 0; i < bo_count; i++)
 | 
				
			||||||
		implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
 | 
							implicit_fences[i] = dma_resv_get_excl_unlocked(bos[i]->resv);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void panfrost_attach_object_fences(struct drm_gem_object **bos,
 | 
					static void panfrost_attach_object_fences(struct drm_gem_object **bos,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -82,6 +82,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define GPU_TEXTURE_FEATURES(n)		(0x0B0 + ((n) * 4))
 | 
					#define GPU_TEXTURE_FEATURES(n)		(0x0B0 + ((n) * 4))
 | 
				
			||||||
#define GPU_JS_FEATURES(n)		(0x0C0 + ((n) * 4))
 | 
					#define GPU_JS_FEATURES(n)		(0x0C0 + ((n) * 4))
 | 
				
			||||||
 | 
					#define GPU_AFBC_FEATURES		(0x4C)	/* (RO) AFBC support on Bifrost */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define GPU_SHADER_PRESENT_LO		0x100	/* (RO) Shader core present bitmap, low word */
 | 
					#define GPU_SHADER_PRESENT_LO		0x100	/* (RO) Shader core present bitmap, low word */
 | 
				
			||||||
#define GPU_SHADER_PRESENT_HI		0x104	/* (RO) Shader core present bitmap, high word */
 | 
					#define GPU_SHADER_PRESENT_HI		0x104	/* (RO) Shader core present bitmap, high word */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3,6 +3,7 @@ config DRM_PL111
 | 
				
			||||||
	tristate "DRM Support for PL111 CLCD Controller"
 | 
						tristate "DRM Support for PL111 CLCD Controller"
 | 
				
			||||||
	depends on DRM
 | 
						depends on DRM
 | 
				
			||||||
	depends on ARM || ARM64 || COMPILE_TEST
 | 
						depends on ARM || ARM64 || COMPILE_TEST
 | 
				
			||||||
 | 
						depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
 | 
				
			||||||
	depends on COMMON_CLK
 | 
						depends on COMMON_CLK
 | 
				
			||||||
	select DRM_KMS_HELPER
 | 
						select DRM_KMS_HELPER
 | 
				
			||||||
	select DRM_KMS_CMA_HELPER
 | 
						select DRM_KMS_CMA_HELPER
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
 | 
				
			||||||
		int rel;
 | 
							int rel;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		rcu_read_lock();
 | 
							rcu_read_lock();
 | 
				
			||||||
		fobj = rcu_dereference(bo->tbo.base.resv->fence);
 | 
							fobj = dma_resv_shared_list(bo->tbo.base.resv);
 | 
				
			||||||
		rel = fobj ? fobj->shared_count : 0;
 | 
							rel = fobj ? fobj->shared_count : 0;
 | 
				
			||||||
		rcu_read_unlock();
 | 
							rcu_read_unlock();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -292,12 +292,12 @@ qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
 | 
				
			||||||
			unsigned long offset)
 | 
								unsigned long offset)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct qxl_memslot *slot =
 | 
						struct qxl_memslot *slot =
 | 
				
			||||||
		(bo->tbo.mem.mem_type == TTM_PL_VRAM)
 | 
							(bo->tbo.resource->mem_type == TTM_PL_VRAM)
 | 
				
			||||||
		? &qdev->main_slot : &qdev->surfaces_slot;
 | 
							? &qdev->main_slot : &qdev->surfaces_slot;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
       /* TODO - need to hold one of the locks to read bo->tbo.mem.start */
 | 
					       /* TODO - need to hold one of the locks to read bo->tbo.resource->start */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return slot->high_bits | ((bo->tbo.mem.start << PAGE_SHIFT) + offset);
 | 
						return slot->high_bits | ((bo->tbo.resource->start << PAGE_SHIFT) + offset);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* qxl_display.c */
 | 
					/* qxl_display.c */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -58,6 +58,8 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
 | 
				
			||||||
	surf.height = args->height;
 | 
						surf.height = args->height;
 | 
				
			||||||
	surf.stride = pitch;
 | 
						surf.stride = pitch;
 | 
				
			||||||
	surf.format = format;
 | 
						surf.format = format;
 | 
				
			||||||
 | 
						surf.data = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = qxl_gem_object_create_with_handle(qdev, file_priv,
 | 
						r = qxl_gem_object_create_with_handle(qdev, file_priv,
 | 
				
			||||||
					      QXL_GEM_DOMAIN_CPU,
 | 
										      QXL_GEM_DOMAIN_CPU,
 | 
				
			||||||
					      args->size, &surf, &qobj,
 | 
										      args->size, &surf, &qobj,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -212,14 +212,14 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
 | 
				
			||||||
	struct io_mapping *map;
 | 
						struct io_mapping *map;
 | 
				
			||||||
	struct dma_buf_map bo_map;
 | 
						struct dma_buf_map bo_map;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
 | 
						if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
 | 
				
			||||||
		map = qdev->vram_mapping;
 | 
							map = qdev->vram_mapping;
 | 
				
			||||||
	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
 | 
						else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
 | 
				
			||||||
		map = qdev->surface_mapping;
 | 
							map = qdev->surface_mapping;
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		goto fallback;
 | 
							goto fallback;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	offset = bo->tbo.mem.start << PAGE_SHIFT;
 | 
						offset = bo->tbo.resource->start << PAGE_SHIFT;
 | 
				
			||||||
	return io_mapping_map_atomic_wc(map, offset + page_offset);
 | 
						return io_mapping_map_atomic_wc(map, offset + page_offset);
 | 
				
			||||||
fallback:
 | 
					fallback:
 | 
				
			||||||
	if (bo->kptr) {
 | 
						if (bo->kptr) {
 | 
				
			||||||
| 
						 | 
					@ -266,8 +266,8 @@ int qxl_bo_vunmap(struct qxl_bo *bo)
 | 
				
			||||||
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
 | 
					void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
 | 
				
			||||||
			       struct qxl_bo *bo, void *pmap)
 | 
								       struct qxl_bo *bo, void *pmap)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
 | 
						if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
 | 
				
			||||||
	    (bo->tbo.mem.mem_type != TTM_PL_PRIV))
 | 
						    (bo->tbo.resource->mem_type != TTM_PL_PRIV))
 | 
				
			||||||
		goto fallback;
 | 
							goto fallback;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	io_mapping_unmap_atomic(pmap);
 | 
						io_mapping_unmap_atomic(pmap);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -32,6 +32,7 @@
 | 
				
			||||||
#include <drm/ttm/ttm_bo_api.h>
 | 
					#include <drm/ttm/ttm_bo_api.h>
 | 
				
			||||||
#include <drm/ttm/ttm_bo_driver.h>
 | 
					#include <drm/ttm/ttm_bo_driver.h>
 | 
				
			||||||
#include <drm/ttm/ttm_placement.h>
 | 
					#include <drm/ttm/ttm_placement.h>
 | 
				
			||||||
 | 
					#include <drm/ttm/ttm_range_manager.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "qxl_drv.h"
 | 
					#include "qxl_drv.h"
 | 
				
			||||||
#include "qxl_object.h"
 | 
					#include "qxl_object.h"
 | 
				
			||||||
| 
						 | 
					@ -131,7 +132,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
 | 
				
			||||||
	qbo = to_qxl_bo(bo);
 | 
						qbo = to_qxl_bo(bo);
 | 
				
			||||||
	qdev = to_qxl(qbo->tbo.base.dev);
 | 
						qdev = to_qxl(qbo->tbo.base.dev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
 | 
						if (bo->resource->mem_type == TTM_PL_PRIV && qbo->surface_id)
 | 
				
			||||||
		qxl_surface_evict(qdev, qbo, new_mem ? true : false);
 | 
							qxl_surface_evict(qdev, qbo, new_mem ? true : false);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -140,7 +141,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
 | 
				
			||||||
		       struct ttm_resource *new_mem,
 | 
							       struct ttm_resource *new_mem,
 | 
				
			||||||
		       struct ttm_place *hop)
 | 
							       struct ttm_place *hop)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct ttm_resource *old_mem = &bo->mem;
 | 
						struct ttm_resource *old_mem = bo->resource;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	qxl_bo_move_notify(bo, new_mem);
 | 
						qxl_bo_move_notify(bo, new_mem);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -400,8 +400,8 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
 | 
				
			||||||
	struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
 | 
						struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Sort A before B if A is smaller. */
 | 
						/* Sort A before B if A is smaller. */
 | 
				
			||||||
	return (int)la->robj->tbo.mem.num_pages -
 | 
						return (int)la->robj->tbo.resource->num_pages -
 | 
				
			||||||
		(int)lb->robj->tbo.mem.num_pages;
 | 
							(int)lb->robj->tbo.resource->num_pages;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -516,7 +516,7 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
 | 
						r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
 | 
				
			||||||
				&rdev->ring_tmp_bo.bo->tbo.mem);
 | 
									rdev->ring_tmp_bo.bo->tbo.resource);
 | 
				
			||||||
	if (r)
 | 
						if (r)
 | 
				
			||||||
		return r;
 | 
							return r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -530,7 +530,7 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
 | 
				
			||||||
			return -EINVAL;
 | 
								return -EINVAL;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
 | 
							r = radeon_vm_bo_update(rdev, bo_va, bo->tbo.resource);
 | 
				
			||||||
		if (r)
 | 
							if (r)
 | 
				
			||||||
			return r;
 | 
								return r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 | 
				
			||||||
		DRM_ERROR("failed to pin new rbo buffer before flip\n");
 | 
							DRM_ERROR("failed to pin new rbo buffer before flip\n");
 | 
				
			||||||
		goto cleanup;
 | 
							goto cleanup;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
 | 
						work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv));
 | 
				
			||||||
	radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
 | 
						radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
 | 
				
			||||||
	radeon_bo_unreserve(new_rbo);
 | 
						radeon_bo_unreserve(new_rbo);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (domain == RADEON_GEM_DOMAIN_CPU) {
 | 
						if (domain == RADEON_GEM_DOMAIN_CPU) {
 | 
				
			||||||
		/* Asking for cpu access wait for object idle */
 | 
							/* Asking for cpu access wait for object idle */
 | 
				
			||||||
		r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
 | 
							r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
 | 
				
			||||||
		if (!r)
 | 
							if (!r)
 | 
				
			||||||
			r = -EBUSY;
 | 
								r = -EBUSY;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -523,13 +523,13 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	robj = gem_to_radeon_bo(gobj);
 | 
						robj = gem_to_radeon_bo(gobj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
 | 
						r = dma_resv_test_signaled(robj->tbo.base.resv, true);
 | 
				
			||||||
	if (r == 0)
 | 
						if (r == 0)
 | 
				
			||||||
		r = -EBUSY;
 | 
							r = -EBUSY;
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		r = 0;
 | 
							r = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
 | 
						cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
 | 
				
			||||||
	args->domain = radeon_mem_type_to_domain(cur_placement);
 | 
						args->domain = radeon_mem_type_to_domain(cur_placement);
 | 
				
			||||||
	drm_gem_object_put(gobj);
 | 
						drm_gem_object_put(gobj);
 | 
				
			||||||
	return r;
 | 
						return r;
 | 
				
			||||||
| 
						 | 
					@ -552,14 +552,14 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	robj = gem_to_radeon_bo(gobj);
 | 
						robj = gem_to_radeon_bo(gobj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
 | 
						ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
 | 
				
			||||||
	if (ret == 0)
 | 
						if (ret == 0)
 | 
				
			||||||
		r = -EBUSY;
 | 
							r = -EBUSY;
 | 
				
			||||||
	else if (ret < 0)
 | 
						else if (ret < 0)
 | 
				
			||||||
		r = ret;
 | 
							r = ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Flush HDP cache via MMIO if necessary */
 | 
						/* Flush HDP cache via MMIO if necessary */
 | 
				
			||||||
	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
 | 
						cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
 | 
				
			||||||
	if (rdev->asic->mmio_hdp_flush &&
 | 
						if (rdev->asic->mmio_hdp_flush &&
 | 
				
			||||||
	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
 | 
						    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
 | 
				
			||||||
		robj->rdev->asic->mmio_hdp_flush(rdev);
 | 
							robj->rdev->asic->mmio_hdp_flush(rdev);
 | 
				
			||||||
| 
						 | 
					@ -643,7 +643,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
 | 
				
			||||||
		goto error_free;
 | 
							goto error_free;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	list_for_each_entry(entry, &list, head) {
 | 
						list_for_each_entry(entry, &list, head) {
 | 
				
			||||||
		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
 | 
							domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
 | 
				
			||||||
		/* if anything is swapped out don't swap it in here,
 | 
							/* if anything is swapped out don't swap it in here,
 | 
				
			||||||
		   just abort and wait for the next CS */
 | 
							   just abort and wait for the next CS */
 | 
				
			||||||
		if (domain == RADEON_GEM_DOMAIN_CPU)
 | 
							if (domain == RADEON_GEM_DOMAIN_CPU)
 | 
				
			||||||
| 
						 | 
					@ -656,7 +656,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
 | 
				
			||||||
		goto error_unlock;
 | 
							goto error_unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo_va->it.start)
 | 
						if (bo_va->it.start)
 | 
				
			||||||
		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
 | 
							r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
error_unlock:
 | 
					error_unlock:
 | 
				
			||||||
	mutex_unlock(&bo_va->vm->mutex);
 | 
						mutex_unlock(&bo_va->vm->mutex);
 | 
				
			||||||
| 
						 | 
					@ -860,7 +860,7 @@ static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
 | 
				
			||||||
		unsigned domain;
 | 
							unsigned domain;
 | 
				
			||||||
		const char *placement;
 | 
							const char *placement;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
 | 
							domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
 | 
				
			||||||
		switch (domain) {
 | 
							switch (domain) {
 | 
				
			||||||
		case RADEON_GEM_DOMAIN_VRAM:
 | 
							case RADEON_GEM_DOMAIN_VRAM:
 | 
				
			||||||
			placement = "VRAM";
 | 
								placement = "VRAM";
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
 | 
				
			||||||
		return true;
 | 
							return true;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
 | 
						r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
 | 
				
			||||||
				      MAX_SCHEDULE_TIMEOUT);
 | 
									  MAX_SCHEDULE_TIMEOUT);
 | 
				
			||||||
	if (r <= 0)
 | 
						if (r <= 0)
 | 
				
			||||||
		DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 | 
							DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -76,7 +76,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	bo = container_of(tbo, struct radeon_bo, tbo);
 | 
						bo = container_of(tbo, struct radeon_bo, tbo);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
 | 
						radeon_update_memory_usage(bo, bo->tbo.resource->mem_type, -1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mutex_lock(&bo->rdev->gem.mutex);
 | 
						mutex_lock(&bo->rdev->gem.mutex);
 | 
				
			||||||
	list_del_init(&bo->list);
 | 
						list_del_init(&bo->list);
 | 
				
			||||||
| 
						 | 
					@ -250,7 +250,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
 | 
						r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
 | 
				
			||||||
	if (r) {
 | 
						if (r) {
 | 
				
			||||||
		return r;
 | 
							return r;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -359,7 +359,7 @@ void radeon_bo_unpin(struct radeon_bo *bo)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	ttm_bo_unpin(&bo->tbo);
 | 
						ttm_bo_unpin(&bo->tbo);
 | 
				
			||||||
	if (!bo->tbo.pin_count) {
 | 
						if (!bo->tbo.pin_count) {
 | 
				
			||||||
		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
 | 
							if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
 | 
				
			||||||
			bo->rdev->vram_pin_size -= radeon_bo_size(bo);
 | 
								bo->rdev->vram_pin_size -= radeon_bo_size(bo);
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
			bo->rdev->gart_pin_size -= radeon_bo_size(bo);
 | 
								bo->rdev->gart_pin_size -= radeon_bo_size(bo);
 | 
				
			||||||
| 
						 | 
					@ -506,7 +506,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
 | 
				
			||||||
			u32 domain = lobj->preferred_domains;
 | 
								u32 domain = lobj->preferred_domains;
 | 
				
			||||||
			u32 allowed = lobj->allowed_domains;
 | 
								u32 allowed = lobj->allowed_domains;
 | 
				
			||||||
			u32 current_domain =
 | 
								u32 current_domain =
 | 
				
			||||||
				radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
 | 
									radeon_mem_type_to_domain(bo->tbo.resource->mem_type);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			/* Check if this buffer will be moved and don't move it
 | 
								/* Check if this buffer will be moved and don't move it
 | 
				
			||||||
			 * if we have moved too many buffers for this IB already.
 | 
								 * if we have moved too many buffers for this IB already.
 | 
				
			||||||
| 
						 | 
					@ -605,7 +605,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
 | 
						radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
 | 
				
			||||||
			       bo->tbo.mem.start << PAGE_SHIFT,
 | 
								       bo->tbo.resource->start << PAGE_SHIFT,
 | 
				
			||||||
			       bo->tbo.base.size);
 | 
								       bo->tbo.base.size);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -711,7 +711,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
 | 
						if (bo->tbo.resource->mem_type != TTM_PL_VRAM) {
 | 
				
			||||||
		if (!has_moved)
 | 
							if (!has_moved)
 | 
				
			||||||
			return 0;
 | 
								return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -743,7 +743,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
 | 
				
			||||||
	if (!new_mem)
 | 
						if (!new_mem)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
 | 
						radeon_update_memory_usage(rbo, bo->resource->mem_type, -1);
 | 
				
			||||||
	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
 | 
						radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -760,11 +760,11 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 | 
				
			||||||
	rbo = container_of(bo, struct radeon_bo, tbo);
 | 
						rbo = container_of(bo, struct radeon_bo, tbo);
 | 
				
			||||||
	radeon_bo_check_tiling(rbo, 0, 0);
 | 
						radeon_bo_check_tiling(rbo, 0, 0);
 | 
				
			||||||
	rdev = rbo->rdev;
 | 
						rdev = rbo->rdev;
 | 
				
			||||||
	if (bo->mem.mem_type != TTM_PL_VRAM)
 | 
						if (bo->resource->mem_type != TTM_PL_VRAM)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	size = bo->mem.num_pages << PAGE_SHIFT;
 | 
						size = bo->resource->num_pages << PAGE_SHIFT;
 | 
				
			||||||
	offset = bo->mem.start << PAGE_SHIFT;
 | 
						offset = bo->resource->start << PAGE_SHIFT;
 | 
				
			||||||
	if ((offset + size) <= rdev->mc.visible_vram_size)
 | 
						if ((offset + size) <= rdev->mc.visible_vram_size)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -786,7 +786,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 | 
				
			||||||
		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
 | 
							radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
 | 
				
			||||||
		r = ttm_bo_validate(bo, &rbo->placement, &ctx);
 | 
							r = ttm_bo_validate(bo, &rbo->placement, &ctx);
 | 
				
			||||||
	} else if (likely(!r)) {
 | 
						} else if (likely(!r)) {
 | 
				
			||||||
		offset = bo->mem.start << PAGE_SHIFT;
 | 
							offset = bo->resource->start << PAGE_SHIFT;
 | 
				
			||||||
		/* this should never happen */
 | 
							/* this should never happen */
 | 
				
			||||||
		if ((offset + size) > rdev->mc.visible_vram_size)
 | 
							if ((offset + size) > rdev->mc.visible_vram_size)
 | 
				
			||||||
			return VM_FAULT_SIGBUS;
 | 
								return VM_FAULT_SIGBUS;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -95,7 +95,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rdev = radeon_get_rdev(bo->tbo.bdev);
 | 
						rdev = radeon_get_rdev(bo->tbo.bdev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (bo->tbo.mem.mem_type) {
 | 
						switch (bo->tbo.resource->mem_type) {
 | 
				
			||||||
	case TTM_PL_TT:
 | 
						case TTM_PL_TT:
 | 
				
			||||||
		start = rdev->mc.gtt_start;
 | 
							start = rdev->mc.gtt_start;
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
| 
						 | 
					@ -104,7 +104,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return (bo->tbo.mem.start << PAGE_SHIFT) + start;
 | 
						return (bo->tbo.resource->start << PAGE_SHIFT) + start;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
 | 
					static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -154,7 +154,7 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
 | 
						list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
 | 
				
			||||||
		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
 | 
							if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
 | 
				
			||||||
			ttm_bo_unmap_virtual(&bo->tbo);
 | 
								ttm_bo_unmap_virtual(&bo->tbo);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -98,14 +98,14 @@ int radeon_sync_resv(struct radeon_device *rdev,
 | 
				
			||||||
	int r = 0;
 | 
						int r = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* always sync to the exclusive fence */
 | 
						/* always sync to the exclusive fence */
 | 
				
			||||||
	f = dma_resv_get_excl(resv);
 | 
						f = dma_resv_excl_fence(resv);
 | 
				
			||||||
	fence = f ? to_radeon_fence(f) : NULL;
 | 
						fence = f ? to_radeon_fence(f) : NULL;
 | 
				
			||||||
	if (fence && fence->rdev == rdev)
 | 
						if (fence && fence->rdev == rdev)
 | 
				
			||||||
		radeon_sync_fence(sync, fence);
 | 
							radeon_sync_fence(sync, fence);
 | 
				
			||||||
	else if (f)
 | 
						else if (f)
 | 
				
			||||||
		r = dma_fence_wait(f, true);
 | 
							r = dma_fence_wait(f, true);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	flist = dma_resv_get_list(resv);
 | 
						flist = dma_resv_shared_list(resv);
 | 
				
			||||||
	if (shared || !flist || r)
 | 
						if (shared || !flist || r)
 | 
				
			||||||
		return r;
 | 
							return r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
		Reference in a new issue