mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	accel/qaic: Synchronize access to DBC request queue head & tail pointer
Two threads of the same process can potential read and write parallelly to
head and tail pointers of the same DBC request queue. This could lead to a
race condition and corrupt the DBC request queue.
Fixes: ff13be8303 ("accel/qaic: Add datapath")
Signed-off-by: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
Signed-off-by: Youssef Samir <youssef.abdulrahman@oss.qualcomm.com>
Reviewed-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
Reviewed-by: Carl Vanderlip <carl.vanderlip@oss.qualcomm.com>
[jhugo: Add fixes tag]
Signed-off-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
Link: https://lore.kernel.org/r/20251007061837.206132-1-youssef.abdulrahman@oss.qualcomm.com
			
			
This commit is contained in:
		
							parent
							
								
									11f08c30a3
								
							
						
					
					
						commit
						52e59f7740
					
				
					 3 changed files with 15 additions and 2 deletions
				
			
		|  | @ -97,6 +97,8 @@ struct dma_bridge_chan { | ||||||
| 	 * response queue's head and tail pointer of this DBC. | 	 * response queue's head and tail pointer of this DBC. | ||||||
| 	 */ | 	 */ | ||||||
| 	void __iomem		*dbc_base; | 	void __iomem		*dbc_base; | ||||||
|  | 	/* Synchronizes access to Request queue's head and tail pointer */ | ||||||
|  | 	struct mutex		req_lock; | ||||||
| 	/* Head of list where each node is a memory handle queued in request queue */ | 	/* Head of list where each node is a memory handle queued in request queue */ | ||||||
| 	struct list_head	xfer_list; | 	struct list_head	xfer_list; | ||||||
| 	/* Synchronizes DBC readers during cleanup */ | 	/* Synchronizes DBC readers during cleanup */ | ||||||
|  |  | ||||||
|  | @ -1356,13 +1356,17 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr | ||||||
| 		goto release_ch_rcu; | 		goto release_ch_rcu; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	ret = mutex_lock_interruptible(&dbc->req_lock); | ||||||
|  | 	if (ret) | ||||||
|  | 		goto release_ch_rcu; | ||||||
|  | 
 | ||||||
| 	head = readl(dbc->dbc_base + REQHP_OFF); | 	head = readl(dbc->dbc_base + REQHP_OFF); | ||||||
| 	tail = readl(dbc->dbc_base + REQTP_OFF); | 	tail = readl(dbc->dbc_base + REQTP_OFF); | ||||||
| 
 | 
 | ||||||
| 	if (head == U32_MAX || tail == U32_MAX) { | 	if (head == U32_MAX || tail == U32_MAX) { | ||||||
| 		/* PCI link error */ | 		/* PCI link error */ | ||||||
| 		ret = -ENODEV; | 		ret = -ENODEV; | ||||||
| 		goto release_ch_rcu; | 		goto unlock_req_lock; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail); | 	queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail); | ||||||
|  | @ -1370,11 +1374,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr | ||||||
| 	ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc, | 	ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc, | ||||||
| 				     head, &tail); | 				     head, &tail); | ||||||
| 	if (ret) | 	if (ret) | ||||||
| 		goto release_ch_rcu; | 		goto unlock_req_lock; | ||||||
| 
 | 
 | ||||||
| 	/* Finalize commit to hardware */ | 	/* Finalize commit to hardware */ | ||||||
| 	submit_ts = ktime_get_ns(); | 	submit_ts = ktime_get_ns(); | ||||||
| 	writel(tail, dbc->dbc_base + REQTP_OFF); | 	writel(tail, dbc->dbc_base + REQTP_OFF); | ||||||
|  | 	mutex_unlock(&dbc->req_lock); | ||||||
| 
 | 
 | ||||||
| 	update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts, | 	update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts, | ||||||
| 			      submit_ts, queue_level); | 			      submit_ts, queue_level); | ||||||
|  | @ -1382,6 +1387,9 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr | ||||||
| 	if (datapath_polling) | 	if (datapath_polling) | ||||||
| 		schedule_work(&dbc->poll_work); | 		schedule_work(&dbc->poll_work); | ||||||
| 
 | 
 | ||||||
|  | unlock_req_lock: | ||||||
|  | 	if (ret) | ||||||
|  | 		mutex_unlock(&dbc->req_lock); | ||||||
| release_ch_rcu: | release_ch_rcu: | ||||||
| 	srcu_read_unlock(&dbc->ch_lock, rcu_id); | 	srcu_read_unlock(&dbc->ch_lock, rcu_id); | ||||||
| unlock_dev_srcu: | unlock_dev_srcu: | ||||||
|  |  | ||||||
|  | @ -454,6 +454,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, | ||||||
| 			return NULL; | 			return NULL; | ||||||
| 		init_waitqueue_head(&qdev->dbc[i].dbc_release); | 		init_waitqueue_head(&qdev->dbc[i].dbc_release); | ||||||
| 		INIT_LIST_HEAD(&qdev->dbc[i].bo_lists); | 		INIT_LIST_HEAD(&qdev->dbc[i].bo_lists); | ||||||
|  | 		ret = drmm_mutex_init(drm, &qdev->dbc[i].req_lock); | ||||||
|  | 		if (ret) | ||||||
|  | 			return NULL; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return qdev; | 	return qdev; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Pranjal Ramajor Asha Kanojiya
						Pranjal Ramajor Asha Kanojiya