mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	padata: avoid UAF for reorder_work
Although the previous patch can avoid ps and ps UAF for _do_serial, it
can not avoid potential UAF issue for reorder_work. This issue can
happen just as below:
crypto_request			crypto_request		crypto_del_alg
padata_do_serial
  ...
  padata_reorder
    // processes all remaining
    // requests then breaks
    while (1) {
      if (!padata)
        break;
      ...
    }
				padata_do_serial
				  // new request added
				  list_add
    // sees the new request
    queue_work(reorder_work)
				  padata_reorder
				    queue_work_on(squeue->work)
...
				<kworker context>
				padata_serial_worker
				// completes new request,
				// no more outstanding
				// requests
							crypto_del_alg
							  // free pd
<kworker context>
invoke_padata_reorder
  // UAF of pd
To avoid UAF for 'reorder_work', get 'pd' ref before put 'reorder_work'
into the 'serial_wq' and put 'pd' ref until the 'serial_wq' finish.
Fixes: bbefa1dd6a ("crypto: pcrypt - Avoid deadlock by using per-instance padata queues")
Signed-off-by: Chen Ridong <chenridong@huawei.com>
Acked-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
			
			
This commit is contained in:
		
							parent
							
								
									e01780ea46
								
							
						
					
					
						commit
						dd7d37ccf6
					
				
					 1 changed files with 9 additions and 1 deletions
				
			
		|  | @ -352,8 +352,14 @@ static void padata_reorder(struct parallel_data *pd) | ||||||
| 	smp_mb(); | 	smp_mb(); | ||||||
| 
 | 
 | ||||||
| 	reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); | 	reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); | ||||||
| 	if (!list_empty(&reorder->list) && padata_find_next(pd, false)) | 	if (!list_empty(&reorder->list) && padata_find_next(pd, false)) { | ||||||
|  | 		/*
 | ||||||
|  | 		 * Other context(eg. the padata_serial_worker) can finish the request. | ||||||
|  | 		 * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish. | ||||||
|  | 		 */ | ||||||
|  | 		padata_get_pd(pd); | ||||||
| 		queue_work(pinst->serial_wq, &pd->reorder_work); | 		queue_work(pinst->serial_wq, &pd->reorder_work); | ||||||
|  | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void invoke_padata_reorder(struct work_struct *work) | static void invoke_padata_reorder(struct work_struct *work) | ||||||
|  | @ -364,6 +370,8 @@ static void invoke_padata_reorder(struct work_struct *work) | ||||||
| 	pd = container_of(work, struct parallel_data, reorder_work); | 	pd = container_of(work, struct parallel_data, reorder_work); | ||||||
| 	padata_reorder(pd); | 	padata_reorder(pd); | ||||||
| 	local_bh_enable(); | 	local_bh_enable(); | ||||||
|  | 	/* Pairs with putting the reorder_work in the serial_wq */ | ||||||
|  | 	padata_put_pd(pd); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void padata_serial_worker(struct work_struct *serial_work) | static void padata_serial_worker(struct work_struct *serial_work) | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Chen Ridong
						Chen Ridong