mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	net/tls: don't copy negative amounts of data in reencrypt
There is no guarantee the record starts before the skb frags.
If we don't check for this condition copy amount will get
negative, leading to reads and writes to random memory locations.
Familiar hilarity ensues.
Fixes: 4799ac81e5 ("tls: Add rx inline crypto offload")
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: John Hurley <john.hurley@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
			
			
This commit is contained in:
		
							parent
							
								
									b2a20fd072
								
							
						
					
					
						commit
						97e1caa517
					
				
					 1 changed files with 8 additions and 6 deletions
				
			
		| 
						 | 
					@ -628,6 +628,7 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		err = 0;
 | 
							err = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (skb_pagelen(skb) > offset) {
 | 
				
			||||||
		copy = min_t(int, skb_pagelen(skb) - offset,
 | 
							copy = min_t(int, skb_pagelen(skb) - offset,
 | 
				
			||||||
			     rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
 | 
								     rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -636,6 +637,7 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		offset += copy;
 | 
							offset += copy;
 | 
				
			||||||
		buf += copy;
 | 
							buf += copy;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	skb_walk_frags(skb, skb_iter) {
 | 
						skb_walk_frags(skb, skb_iter) {
 | 
				
			||||||
		copy = min_t(int, skb_iter->len,
 | 
							copy = min_t(int, skb_iter->len,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue