mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	 a210fd32a4
			
		
	
	
		a210fd32a4
		
	
	
	
	
		
			
			In order for LSMs and IMA-appraisal to differentiate between kexec_load and kexec_file_load syscalls, both the original and new syscalls must call an LSM hook. This patch adds a call to security_kernel_load_data() in the original kexec_load syscall. Signed-off-by: Mimi Zohar <zohar@linux.vnet.ibm.com> Cc: Eric Biederman <ebiederm@xmission.com> Cc: Kees Cook <keescook@chromium.org> Acked-by: Serge Hallyn <serge@hallyn.com> Acked-by: Kees Cook <keescook@chromium.org> Signed-off-by: James Morris <james.morris@microsoft.com>
		
			
				
	
	
		
			311 lines
		
	
	
	
		
			7.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			311 lines
		
	
	
	
		
			7.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * kexec.c - kexec_load system call
 | |
|  * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
 | |
|  *
 | |
|  * This source code is licensed under the GNU General Public License,
 | |
|  * Version 2.  See the file COPYING for more details.
 | |
|  */
 | |
| 
 | |
| #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 | |
| 
 | |
| #include <linux/capability.h>
 | |
| #include <linux/mm.h>
 | |
| #include <linux/file.h>
 | |
| #include <linux/security.h>
 | |
| #include <linux/kexec.h>
 | |
| #include <linux/mutex.h>
 | |
| #include <linux/list.h>
 | |
| #include <linux/syscalls.h>
 | |
| #include <linux/vmalloc.h>
 | |
| #include <linux/slab.h>
 | |
| 
 | |
| #include "kexec_internal.h"
 | |
| 
 | |
| static int copy_user_segment_list(struct kimage *image,
 | |
| 				  unsigned long nr_segments,
 | |
| 				  struct kexec_segment __user *segments)
 | |
| {
 | |
| 	int ret;
 | |
| 	size_t segment_bytes;
 | |
| 
 | |
| 	/* Read in the segments */
 | |
| 	image->nr_segments = nr_segments;
 | |
| 	segment_bytes = nr_segments * sizeof(*segments);
 | |
| 	ret = copy_from_user(image->segment, segments, segment_bytes);
 | |
| 	if (ret)
 | |
| 		ret = -EFAULT;
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
 | |
| 			     unsigned long nr_segments,
 | |
| 			     struct kexec_segment __user *segments,
 | |
| 			     unsigned long flags)
 | |
| {
 | |
| 	int ret;
 | |
| 	struct kimage *image;
 | |
| 	bool kexec_on_panic = flags & KEXEC_ON_CRASH;
 | |
| 
 | |
| 	if (kexec_on_panic) {
 | |
| 		/* Verify we have a valid entry point */
 | |
| 		if ((entry < phys_to_boot_phys(crashk_res.start)) ||
 | |
| 		    (entry > phys_to_boot_phys(crashk_res.end)))
 | |
| 			return -EADDRNOTAVAIL;
 | |
| 	}
 | |
| 
 | |
| 	/* Allocate and initialize a controlling structure */
 | |
| 	image = do_kimage_alloc_init();
 | |
| 	if (!image)
 | |
| 		return -ENOMEM;
 | |
| 
 | |
| 	image->start = entry;
 | |
| 
 | |
| 	ret = copy_user_segment_list(image, nr_segments, segments);
 | |
| 	if (ret)
 | |
| 		goto out_free_image;
 | |
| 
 | |
| 	if (kexec_on_panic) {
 | |
| 		/* Enable special crash kernel control page alloc policy. */
 | |
| 		image->control_page = crashk_res.start;
 | |
| 		image->type = KEXEC_TYPE_CRASH;
 | |
| 	}
 | |
| 
 | |
| 	ret = sanity_check_segment_list(image);
 | |
| 	if (ret)
 | |
| 		goto out_free_image;
 | |
| 
 | |
| 	/*
 | |
| 	 * Find a location for the control code buffer, and add it
 | |
| 	 * the vector of segments so that it's pages will also be
 | |
| 	 * counted as destination pages.
 | |
| 	 */
 | |
| 	ret = -ENOMEM;
 | |
| 	image->control_code_page = kimage_alloc_control_pages(image,
 | |
| 					   get_order(KEXEC_CONTROL_PAGE_SIZE));
 | |
| 	if (!image->control_code_page) {
 | |
| 		pr_err("Could not allocate control_code_buffer\n");
 | |
| 		goto out_free_image;
 | |
| 	}
 | |
| 
 | |
| 	if (!kexec_on_panic) {
 | |
| 		image->swap_page = kimage_alloc_control_pages(image, 0);
 | |
| 		if (!image->swap_page) {
 | |
| 			pr_err("Could not allocate swap buffer\n");
 | |
| 			goto out_free_control_pages;
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	*rimage = image;
 | |
| 	return 0;
 | |
| out_free_control_pages:
 | |
| 	kimage_free_page_list(&image->control_pages);
 | |
| out_free_image:
 | |
| 	kfree(image);
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
 | |
| 		struct kexec_segment __user *segments, unsigned long flags)
 | |
| {
 | |
| 	struct kimage **dest_image, *image;
 | |
| 	unsigned long i;
 | |
| 	int ret;
 | |
| 
 | |
| 	if (flags & KEXEC_ON_CRASH) {
 | |
| 		dest_image = &kexec_crash_image;
 | |
| 		if (kexec_crash_image)
 | |
| 			arch_kexec_unprotect_crashkres();
 | |
| 	} else {
 | |
| 		dest_image = &kexec_image;
 | |
| 	}
 | |
| 
 | |
| 	if (nr_segments == 0) {
 | |
| 		/* Uninstall image */
 | |
| 		kimage_free(xchg(dest_image, NULL));
 | |
| 		return 0;
 | |
| 	}
 | |
| 	if (flags & KEXEC_ON_CRASH) {
 | |
| 		/*
 | |
| 		 * Loading another kernel to switch to if this one
 | |
| 		 * crashes.  Free any current crash dump kernel before
 | |
| 		 * we corrupt it.
 | |
| 		 */
 | |
| 		kimage_free(xchg(&kexec_crash_image, NULL));
 | |
| 	}
 | |
| 
 | |
| 	ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
 | |
| 	if (ret)
 | |
| 		return ret;
 | |
| 
 | |
| 	if (flags & KEXEC_PRESERVE_CONTEXT)
 | |
| 		image->preserve_context = 1;
 | |
| 
 | |
| 	ret = machine_kexec_prepare(image);
 | |
| 	if (ret)
 | |
| 		goto out;
 | |
| 
 | |
| 	/*
 | |
| 	 * Some architecture(like S390) may touch the crash memory before
 | |
| 	 * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
 | |
| 	 */
 | |
| 	ret = kimage_crash_copy_vmcoreinfo(image);
 | |
| 	if (ret)
 | |
| 		goto out;
 | |
| 
 | |
| 	for (i = 0; i < nr_segments; i++) {
 | |
| 		ret = kimage_load_segment(image, &image->segment[i]);
 | |
| 		if (ret)
 | |
| 			goto out;
 | |
| 	}
 | |
| 
 | |
| 	kimage_terminate(image);
 | |
| 
 | |
| 	/* Install the new kernel and uninstall the old */
 | |
| 	image = xchg(dest_image, image);
 | |
| 
 | |
| out:
 | |
| 	if ((flags & KEXEC_ON_CRASH) && kexec_crash_image)
 | |
| 		arch_kexec_protect_crashkres();
 | |
| 
 | |
| 	kimage_free(image);
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * Exec Kernel system call: for obvious reasons only root may call it.
 | |
|  *
 | |
|  * This call breaks up into three pieces.
 | |
|  * - A generic part which loads the new kernel from the current
 | |
|  *   address space, and very carefully places the data in the
 | |
|  *   allocated pages.
 | |
|  *
 | |
|  * - A generic part that interacts with the kernel and tells all of
 | |
|  *   the devices to shut down.  Preventing on-going dmas, and placing
 | |
|  *   the devices in a consistent state so a later kernel can
 | |
|  *   reinitialize them.
 | |
|  *
 | |
|  * - A machine specific part that includes the syscall number
 | |
|  *   and then copies the image to it's final destination.  And
 | |
|  *   jumps into the image at entry.
 | |
|  *
 | |
|  * kexec does not sync, or unmount filesystems so if you need
 | |
|  * that to happen you need to do that yourself.
 | |
|  */
 | |
| 
 | |
| static inline int kexec_load_check(unsigned long nr_segments,
 | |
| 				   unsigned long flags)
 | |
| {
 | |
| 	int result;
 | |
| 
 | |
| 	/* We only trust the superuser with rebooting the system. */
 | |
| 	if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
 | |
| 		return -EPERM;
 | |
| 
 | |
| 	/* Permit LSMs and IMA to fail the kexec */
 | |
| 	result = security_kernel_load_data(LOADING_KEXEC_IMAGE);
 | |
| 	if (result < 0)
 | |
| 		return result;
 | |
| 
 | |
| 	/*
 | |
| 	 * Verify we have a legal set of flags
 | |
| 	 * This leaves us room for future extensions.
 | |
| 	 */
 | |
| 	if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	/* Put an artificial cap on the number
 | |
| 	 * of segments passed to kexec_load.
 | |
| 	 */
 | |
| 	if (nr_segments > KEXEC_SEGMENT_MAX)
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
 | |
| 		struct kexec_segment __user *, segments, unsigned long, flags)
 | |
| {
 | |
| 	int result;
 | |
| 
 | |
| 	result = kexec_load_check(nr_segments, flags);
 | |
| 	if (result)
 | |
| 		return result;
 | |
| 
 | |
| 	/* Verify we are on the appropriate architecture */
 | |
| 	if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
 | |
| 		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	/* Because we write directly to the reserved memory
 | |
| 	 * region when loading crash kernels we need a mutex here to
 | |
| 	 * prevent multiple crash  kernels from attempting to load
 | |
| 	 * simultaneously, and to prevent a crash kernel from loading
 | |
| 	 * over the top of a in use crash kernel.
 | |
| 	 *
 | |
| 	 * KISS: always take the mutex.
 | |
| 	 */
 | |
| 	if (!mutex_trylock(&kexec_mutex))
 | |
| 		return -EBUSY;
 | |
| 
 | |
| 	result = do_kexec_load(entry, nr_segments, segments, flags);
 | |
| 
 | |
| 	mutex_unlock(&kexec_mutex);
 | |
| 
 | |
| 	return result;
 | |
| }
 | |
| 
 | |
| #ifdef CONFIG_COMPAT
 | |
| COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
 | |
| 		       compat_ulong_t, nr_segments,
 | |
| 		       struct compat_kexec_segment __user *, segments,
 | |
| 		       compat_ulong_t, flags)
 | |
| {
 | |
| 	struct compat_kexec_segment in;
 | |
| 	struct kexec_segment out, __user *ksegments;
 | |
| 	unsigned long i, result;
 | |
| 
 | |
| 	result = kexec_load_check(nr_segments, flags);
 | |
| 	if (result)
 | |
| 		return result;
 | |
| 
 | |
| 	/* Don't allow clients that don't understand the native
 | |
| 	 * architecture to do anything.
 | |
| 	 */
 | |
| 	if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
 | |
| 	for (i = 0; i < nr_segments; i++) {
 | |
| 		result = copy_from_user(&in, &segments[i], sizeof(in));
 | |
| 		if (result)
 | |
| 			return -EFAULT;
 | |
| 
 | |
| 		out.buf   = compat_ptr(in.buf);
 | |
| 		out.bufsz = in.bufsz;
 | |
| 		out.mem   = in.mem;
 | |
| 		out.memsz = in.memsz;
 | |
| 
 | |
| 		result = copy_to_user(&ksegments[i], &out, sizeof(out));
 | |
| 		if (result)
 | |
| 			return -EFAULT;
 | |
| 	}
 | |
| 
 | |
| 	/* Because we write directly to the reserved memory
 | |
| 	 * region when loading crash kernels we need a mutex here to
 | |
| 	 * prevent multiple crash  kernels from attempting to load
 | |
| 	 * simultaneously, and to prevent a crash kernel from loading
 | |
| 	 * over the top of a in use crash kernel.
 | |
| 	 *
 | |
| 	 * KISS: always take the mutex.
 | |
| 	 */
 | |
| 	if (!mutex_trylock(&kexec_mutex))
 | |
| 		return -EBUSY;
 | |
| 
 | |
| 	result = do_kexec_load(entry, nr_segments, ksegments, flags);
 | |
| 
 | |
| 	mutex_unlock(&kexec_mutex);
 | |
| 
 | |
| 	return result;
 | |
| }
 | |
| #endif
 |