mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	exit: Put an upper limit on how often we can oops
Many Linux systems are configured to not panic on oops; but allowing an attacker to oops the system **really** often can make even bugs that look completely unexploitable exploitable (like NULL dereferences and such) if each crash elevates a refcount by one or a lock is taken in read mode, and this causes a counter to eventually overflow. The most interesting counters for this are 32 bits wide (like open-coded refcounts that don't use refcount_t). (The ldsem reader count on 32-bit platforms is just 16 bits, but probably nobody cares about 32-bit platforms that much nowadays.) So let's panic the system if the kernel is constantly oopsing. The speed of oopsing 2^32 times probably depends on several factors, like how long the stack trace is and which unwinder you're using; an empirically important one is whether your console is showing a graphical environment or a text console that oopses will be printed to. In a quick single-threaded benchmark, it looks like oopsing in a vfork() child with a very short stack trace only takes ~510 microseconds per run when a graphical console is active; but switching to a text console that oopses are printed to slows it down around 87x, to ~45 milliseconds per run. (Adding more threads makes this faster, but the actual oops printing happens under &die_lock on x86, so you can maybe speed this up by a factor of around 2 and then any further improvement gets eaten up by lock contention.) It looks like it would take around 8-12 days to overflow a 32-bit counter with repeated oopsing on a multi-core X86 system running a graphical environment; both me (in an X86 VM) and Seth (with a distro kernel on normal hardware in a standard configuration) got numbers in that ballpark. 12 days aren't *that* short on a desktop system, and you'd likely need much longer on a typical server system (assuming that people don't run graphical desktop environments on their servers), and this is a *very* noisy and violent approach to exploiting the kernel; and it also seems to take orders of magnitude longer on some machines, probably because stuff like EFI pstore will slow it down a ton if that's active. Signed-off-by: Jann Horn <jannh@google.com> Link: https://lore.kernel.org/r/20221107201317.324457-1-jannh@google.com Reviewed-by: Luis Chamberlain <mcgrof@kernel.org> Signed-off-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20221117234328.594699-2-keescook@chromium.org
This commit is contained in:
		
							parent
							
								
									9360d035a5
								
							
						
					
					
						commit
						d4ccd54d28
					
				
					 2 changed files with 50 additions and 0 deletions
				
			
		| 
						 | 
					@ -667,6 +667,14 @@ This is the default behavior.
 | 
				
			||||||
an oops event is detected.
 | 
					an oops event is detected.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					oops_limit
 | 
				
			||||||
 | 
					==========
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Number of kernel oopses after which the kernel should panic when
 | 
				
			||||||
 | 
					``panic_on_oops`` is not set. Setting this to 0 or 1 has the same effect
 | 
				
			||||||
 | 
					as setting ``panic_on_oops=1``.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
osrelease, ostype & version
 | 
					osrelease, ostype & version
 | 
				
			||||||
===========================
 | 
					===========================
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -72,6 +72,33 @@
 | 
				
			||||||
#include <asm/unistd.h>
 | 
					#include <asm/unistd.h>
 | 
				
			||||||
#include <asm/mmu_context.h>
 | 
					#include <asm/mmu_context.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * The default value should be high enough to not crash a system that randomly
 | 
				
			||||||
 | 
					 * crashes its kernel from time to time, but low enough to at least not permit
 | 
				
			||||||
 | 
					 * overflowing 32-bit refcounts or the ldsem writer count.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static unsigned int oops_limit = 10000;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_SYSCTL
 | 
				
			||||||
 | 
					static struct ctl_table kern_exit_table[] = {
 | 
				
			||||||
 | 
						{
 | 
				
			||||||
 | 
							.procname       = "oops_limit",
 | 
				
			||||||
 | 
							.data           = &oops_limit,
 | 
				
			||||||
 | 
							.maxlen         = sizeof(oops_limit),
 | 
				
			||||||
 | 
							.mode           = 0644,
 | 
				
			||||||
 | 
							.proc_handler   = proc_douintvec,
 | 
				
			||||||
 | 
						},
 | 
				
			||||||
 | 
						{ }
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static __init int kernel_exit_sysctls_init(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						register_sysctl_init("kernel", kern_exit_table);
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					late_initcall(kernel_exit_sysctls_init);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void __unhash_process(struct task_struct *p, bool group_dead)
 | 
					static void __unhash_process(struct task_struct *p, bool group_dead)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	nr_threads--;
 | 
						nr_threads--;
 | 
				
			||||||
| 
						 | 
					@ -874,6 +901,8 @@ void __noreturn do_exit(long code)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __noreturn make_task_dead(int signr)
 | 
					void __noreturn make_task_dead(int signr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						static atomic_t oops_count = ATOMIC_INIT(0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Take the task off the cpu after something catastrophic has
 | 
						 * Take the task off the cpu after something catastrophic has
 | 
				
			||||||
	 * happened.
 | 
						 * happened.
 | 
				
			||||||
| 
						 | 
					@ -897,6 +926,19 @@ void __noreturn make_task_dead(int signr)
 | 
				
			||||||
		preempt_count_set(PREEMPT_ENABLED);
 | 
							preempt_count_set(PREEMPT_ENABLED);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Every time the system oopses, if the oops happens while a reference
 | 
				
			||||||
 | 
						 * to an object was held, the reference leaks.
 | 
				
			||||||
 | 
						 * If the oops doesn't also leak memory, repeated oopsing can cause
 | 
				
			||||||
 | 
						 * reference counters to wrap around (if they're not using refcount_t).
 | 
				
			||||||
 | 
						 * This means that repeated oopsing can make unexploitable-looking bugs
 | 
				
			||||||
 | 
						 * exploitable through repeated oopsing.
 | 
				
			||||||
 | 
						 * To make sure this can't happen, place an upper bound on how often the
 | 
				
			||||||
 | 
						 * kernel may oops without panic().
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (atomic_inc_return(&oops_count) >= READ_ONCE(oops_limit))
 | 
				
			||||||
 | 
							panic("Oopsed too often (kernel.oops_limit is %d)", oops_limit);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * We're taking recursive faults here in make_task_dead. Safest is to just
 | 
						 * We're taking recursive faults here in make_task_dead. Safest is to just
 | 
				
			||||||
	 * leave this task alone and wait for reboot.
 | 
						 * leave this task alone and wait for reboot.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue