mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-03 18:20:25 +02:00 
			
		
		
		
	- The 7 patch series "powerpc/crash: use generic crashkernel
reservation" from Sourabh Jain changes powerpc's kexec code to use more of the generic layers. - The 2 patch series "get_maintainer: report subsystem status separately" from Vlastimil Babka makes some long-requested improvements to the get_maintainer output. - The 4 patch series "ucount: Simplify refcounting with rcuref_t" from Sebastian Siewior cleans up and optimizing the refcounting in the ucount code. - The 12 patch series "reboot: support runtime configuration of emergency hw_protection action" from Ahmad Fatoum improves the ability for a driver to perform an emergency system shutdown or reboot. - The 16 patch series "Converge on using secs_to_jiffies() part two" from Easwar Hariharan performs further migrations from msecs_to_jiffies() to secs_to_jiffies(). - The 7 patch series "lib/interval_tree: add some test cases and cleanup" from Wei Yang permits more userspace testing of kernel library code, adds some more tests and performs some cleanups. - The 2 patch series "hung_task: Dump the blocking task stacktrace" from Masami Hiramatsu arranges for the hung_task detector to dump the stack of the blocking task and not just that of the blocked task. - The 4 patch series "resource: Split and use DEFINE_RES*() macros" from Andy Shevchenko provides some cleanups to the resource definition macros. - Plus the usual shower of singleton patches - please see the individual changelogs for details. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZ+nuqwAKCRDdBJ7gKXxA jtNqAQDxqJpjWkzn4yN9CNSs1ivVx3fr6SqazlYCrt3u89WQvwEA1oRrGpETzUGq r6khQUIcQImPPcjFqEFpuiSOU0MBZA0= =Kii8 -----END PGP SIGNATURE----- Merge tag 'mm-nonmm-stable-2025-03-30-18-23' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull non-MM updates from Andrew Morton: - The series "powerpc/crash: use generic crashkernel reservation" from Sourabh Jain changes powerpc's kexec code to use more of the generic layers. - The series "get_maintainer: report subsystem status separately" from Vlastimil Babka makes some long-requested improvements to the get_maintainer output. - The series "ucount: Simplify refcounting with rcuref_t" from Sebastian Siewior cleans up and optimizing the refcounting in the ucount code. - The series "reboot: support runtime configuration of emergency hw_protection action" from Ahmad Fatoum improves the ability for a driver to perform an emergency system shutdown or reboot. - The series "Converge on using secs_to_jiffies() part two" from Easwar Hariharan performs further migrations from msecs_to_jiffies() to secs_to_jiffies(). - The series "lib/interval_tree: add some test cases and cleanup" from Wei Yang permits more userspace testing of kernel library code, adds some more tests and performs some cleanups. - The series "hung_task: Dump the blocking task stacktrace" from Masami Hiramatsu arranges for the hung_task detector to dump the stack of the blocking task and not just that of the blocked task. - The series "resource: Split and use DEFINE_RES*() macros" from Andy Shevchenko provides some cleanups to the resource definition macros. - Plus the usual shower of singleton patches - please see the individual changelogs for details. * tag 'mm-nonmm-stable-2025-03-30-18-23' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (77 commits) mailmap: consolidate email addresses of Alexander Sverdlin fs/procfs: fix the comment above proc_pid_wchan() relay: use kasprintf() instead of fixed buffer formatting resource: replace open coded variant of DEFINE_RES() resource: replace open coded variants of DEFINE_RES_*_NAMED() resource: replace open coded variant of DEFINE_RES_NAMED_DESC() resource: split DEFINE_RES_NAMED_DESC() out of DEFINE_RES_NAMED() samples: add hung_task detector mutex blocking sample hung_task: show the blocker task if the task is hung on mutex kexec_core: accept unaccepted kexec segments' destination addresses watchdog/perf: optimize bytes copied and remove manual NUL-termination lib/interval_tree: fix the comment of interval_tree_span_iter_next_gap() lib/interval_tree: skip the check before go to the right subtree lib/interval_tree: add test case for span iteration lib/interval_tree: add test case for interval_tree_iter_xxx() helpers lib/rbtree: add random seed lib/rbtree: split tests lib/rbtree: enable userland test suite for rbtree related data structure checkpatch: describe --min-conf-desc-length scripts/gdb/symbols: determine KASLR offset on s390 ...
This commit is contained in:
		
						commit
						d6b02199cd
					
				
					 107 changed files with 1392 additions and 627 deletions
				
			
		
							
								
								
									
										11
									
								
								.mailmap
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								.mailmap
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -31,6 +31,13 @@ Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
 | 
			
		|||
Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
 | 
			
		||||
Alexander Mikhalitsyn <alexander@mihalicyn.com> <alexander.mikhalitsyn@virtuozzo.com>
 | 
			
		||||
Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com>
 | 
			
		||||
Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin.ext@nsn.com>
 | 
			
		||||
Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin@gmx.de>
 | 
			
		||||
Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin@nokia.com>
 | 
			
		||||
Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin@nsn.com>
 | 
			
		||||
Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin@siemens.com>
 | 
			
		||||
Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin@sysgo.com>
 | 
			
		||||
Alexander Sverdlin <alexander.sverdlin@gmail.com> <subaparts@yandex.ru>
 | 
			
		||||
Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
 | 
			
		||||
Alexandre Ghiti <alex@ghiti.fr> <alexandre.ghiti@canonical.com>
 | 
			
		||||
Alexei Avshalom Lazar <quic_ailizaro@quicinc.com> <ailizaro@codeaurora.org>
 | 
			
		||||
| 
						 | 
				
			
			@ -153,7 +160,6 @@ Carlos Bilbao <carlos.bilbao@kernel.org> <carlos.bilbao@amd.com>
 | 
			
		|||
Carlos Bilbao <carlos.bilbao@kernel.org> <carlos.bilbao.osdev@gmail.com>
 | 
			
		||||
Carlos Bilbao <carlos.bilbao@kernel.org> <bilbao@vt.edu>
 | 
			
		||||
Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
 | 
			
		||||
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
 | 
			
		||||
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
 | 
			
		||||
Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
 | 
			
		||||
Chester Lin <chester62515@gmail.com> <clin@suse.com>
 | 
			
		||||
| 
						 | 
				
			
			@ -271,6 +277,7 @@ Hamza Mahfooz <hamzamahfooz@linux.microsoft.com> <hamza.mahfooz@amd.com>
 | 
			
		|||
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
 | 
			
		||||
Hans Verkuil <hverkuil@xs4all.nl> <hansverk@cisco.com>
 | 
			
		||||
Hans Verkuil <hverkuil@xs4all.nl> <hverkuil-cisco@xs4all.nl>
 | 
			
		||||
Harry Yoo <harry.yoo@oracle.com> <42.hyeyoo@gmail.com>
 | 
			
		||||
Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
 | 
			
		||||
Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com>
 | 
			
		||||
Heiko Stuebner <heiko@sntech.de> <heiko.stuebner@bqreaders.com>
 | 
			
		||||
| 
						 | 
				
			
			@ -305,7 +312,6 @@ Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
 | 
			
		|||
Jan Kuliga <jtkuliga.kdev@gmail.com> <jankul@alatek.krakow.pl>
 | 
			
		||||
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com>
 | 
			
		||||
Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com>
 | 
			
		||||
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@parity.io>
 | 
			
		||||
Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
 | 
			
		||||
Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
 | 
			
		||||
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
 | 
			
		||||
| 
						 | 
				
			
			@ -762,7 +768,6 @@ Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
 | 
			
		|||
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
 | 
			
		||||
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
 | 
			
		||||
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
 | 
			
		||||
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org>
 | 
			
		||||
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com>
 | 
			
		||||
Vishnu Dasa <vishnu.dasa@broadcom.com> <vdasa@vmware.com>
 | 
			
		||||
Vivek Aknurwar <quic_viveka@quicinc.com> <viveka@codeaurora.org>
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -30,3 +30,11 @@ KernelVersion:	5.11
 | 
			
		|||
Contact:	Matteo Croce <mcroce@microsoft.com>
 | 
			
		||||
Description:	Don't wait for any other CPUs on reboot and
 | 
			
		||||
		avoid anything that could hang.
 | 
			
		||||
 | 
			
		||||
What:		/sys/kernel/reboot/hw_protection
 | 
			
		||||
Date:		April 2025
 | 
			
		||||
KernelVersion:	6.15
 | 
			
		||||
Contact:	Ahmad Fatoum <a.fatoum@pengutronix.de>
 | 
			
		||||
Description:	Hardware protection action taken on critical events like
 | 
			
		||||
		overtemperature or imminent voltage loss.
 | 
			
		||||
		Valid values are: reboot shutdown
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1954,6 +1954,12 @@
 | 
			
		|||
			which allow the hypervisor to 'idle' the guest
 | 
			
		||||
			on lock contention.
 | 
			
		||||
 | 
			
		||||
	hw_protection=	[HW]
 | 
			
		||||
			Format: reboot | shutdown
 | 
			
		||||
 | 
			
		||||
			Hardware protection action taken on critical events like
 | 
			
		||||
			overtemperature or imminent voltage loss.
 | 
			
		||||
 | 
			
		||||
	i2c_bus=	[HW]	Override the default board specific I2C bus speed
 | 
			
		||||
				or register an additional I2C bus that is not
 | 
			
		||||
				registered from board initialization code.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -82,9 +82,8 @@ patternProperties:
 | 
			
		|||
        $ref: /schemas/types.yaml#/definitions/string
 | 
			
		||||
        description: |
 | 
			
		||||
          The action the OS should perform after the critical temperature is reached.
 | 
			
		||||
          By default the system will shutdown as a safe action to prevent damage
 | 
			
		||||
          to the hardware, if the property is not set.
 | 
			
		||||
          The shutdown action should be always the default and preferred one.
 | 
			
		||||
          If the property is not set, it is up to the system to select the correct
 | 
			
		||||
          action. The recommended and preferred default is shutdown.
 | 
			
		||||
          Choose 'reboot' with care, as the hardware may be in thermal stress,
 | 
			
		||||
          thus leading to infinite reboots that may cause damage to the hardware.
 | 
			
		||||
          Make sure the firmware/bootloader will act as the last resort and take
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -413,18 +413,21 @@ This function serves as an arbitrator to set the state of a cooling
 | 
			
		|||
device. It sets the cooling device to the deepest cooling state if
 | 
			
		||||
possible.
 | 
			
		||||
 | 
			
		||||
5. thermal_emergency_poweroff
 | 
			
		||||
=============================
 | 
			
		||||
5. Critical Events
 | 
			
		||||
==================
 | 
			
		||||
 | 
			
		||||
On an event of critical trip temperature crossing the thermal framework
 | 
			
		||||
shuts down the system by calling hw_protection_shutdown(). The
 | 
			
		||||
hw_protection_shutdown() first attempts to perform an orderly shutdown
 | 
			
		||||
but accepts a delay after which it proceeds doing a forced power-off
 | 
			
		||||
or as last resort an emergency_restart.
 | 
			
		||||
On an event of critical trip temperature crossing, the thermal framework
 | 
			
		||||
will trigger a hardware protection power-off (shutdown) or reboot,
 | 
			
		||||
depending on configuration.
 | 
			
		||||
 | 
			
		||||
At first, the kernel will attempt an orderly power-off or reboot, but
 | 
			
		||||
accepts a delay after which it proceeds to do a forced power-off or
 | 
			
		||||
reboot, respectively. If this fails, ``emergency_restart()`` is invoked
 | 
			
		||||
as last resort.
 | 
			
		||||
 | 
			
		||||
The delay should be carefully profiled so as to give adequate time for
 | 
			
		||||
orderly poweroff.
 | 
			
		||||
orderly power-off or reboot.
 | 
			
		||||
 | 
			
		||||
If the delay is set to 0 emergency poweroff will not be supported. So a
 | 
			
		||||
carefully profiled non-zero positive value is a must for emergency
 | 
			
		||||
poweroff to be triggered.
 | 
			
		||||
If the delay is set to 0, the emergency action will not be supported. So a
 | 
			
		||||
carefully profiled non-zero positive value is a must for the emergency
 | 
			
		||||
action to be triggered.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -128,6 +128,16 @@ process running on the system, which is named after the process ID (PID).
 | 
			
		|||
The link  'self'  points to  the process reading the file system. Each process
 | 
			
		||||
subdirectory has the entries listed in Table 1-1.
 | 
			
		||||
 | 
			
		||||
A process can read its own information from /proc/PID/* with no extra
 | 
			
		||||
permissions. When reading /proc/PID/* information for other processes, reading
 | 
			
		||||
process is required to have either CAP_SYS_PTRACE capability with
 | 
			
		||||
PTRACE_MODE_READ access permissions, or, alternatively, CAP_PERFMON
 | 
			
		||||
capability. This applies to all read-only information like `maps`, `environ`,
 | 
			
		||||
`pagemap`, etc. The only exception is `mem` file due to its read-write nature,
 | 
			
		||||
which requires CAP_SYS_PTRACE capabilities with more elevated
 | 
			
		||||
PTRACE_MODE_ATTACH permissions; CAP_PERFMON capability does not grant access
 | 
			
		||||
to /proc/PID/mem for other processes.
 | 
			
		||||
 | 
			
		||||
Note that an open file descriptor to /proc/<pid> or to any of its
 | 
			
		||||
contained files or subdirectories does not prevent <pid> being reused
 | 
			
		||||
for some other process in the event that <pid> exits. Operations on
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -18802,6 +18802,7 @@ F:	mm/percpu*.c
 | 
			
		|||
 | 
			
		||||
PER-TASK DELAY ACCOUNTING
 | 
			
		||||
M:	Balbir Singh <bsingharora@gmail.com>
 | 
			
		||||
M:	Yang Yang <yang.yang29@zte.com.cn>
 | 
			
		||||
S:	Maintained
 | 
			
		||||
F:	include/linux/delayacct.h
 | 
			
		||||
F:	kernel/delayacct.c
 | 
			
		||||
| 
						 | 
				
			
			@ -22153,7 +22154,7 @@ M:	Joonsoo Kim <iamjoonsoo.kim@lge.com>
 | 
			
		|||
M:	Andrew Morton <akpm@linux-foundation.org>
 | 
			
		||||
M:	Vlastimil Babka <vbabka@suse.cz>
 | 
			
		||||
R:	Roman Gushchin <roman.gushchin@linux.dev>
 | 
			
		||||
R:	Hyeonggon Yoo <42.hyeyoo@gmail.com>
 | 
			
		||||
R:	Harry Yoo <harry.yoo@oracle.com>
 | 
			
		||||
L:	linux-mm@kvack.org
 | 
			
		||||
S:	Maintained
 | 
			
		||||
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab.git
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -98,21 +98,19 @@ static void __init arch_reserve_crashkernel(void)
 | 
			
		|||
{
 | 
			
		||||
	unsigned long long low_size = 0;
 | 
			
		||||
	unsigned long long crash_base, crash_size;
 | 
			
		||||
	char *cmdline = boot_command_line;
 | 
			
		||||
	bool high = false;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
 | 
			
		||||
	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
 | 
			
		||||
				&crash_size, &crash_base,
 | 
			
		||||
				&low_size, &high);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	reserve_crashkernel_generic(cmdline, crash_size, crash_base,
 | 
			
		||||
				    low_size, high);
 | 
			
		||||
	reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -259,18 +259,17 @@ static void __init arch_reserve_crashkernel(void)
 | 
			
		|||
	int ret;
 | 
			
		||||
	unsigned long long low_size = 0;
 | 
			
		||||
	unsigned long long crash_base, crash_size;
 | 
			
		||||
	char *cmdline = boot_command_line;
 | 
			
		||||
	bool high = false;
 | 
			
		||||
 | 
			
		||||
	if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
 | 
			
		||||
	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
 | 
			
		||||
				&crash_size, &crash_base, &low_size, &high);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	reserve_crashkernel_generic(cmdline, crash_size, crash_base, low_size, high);
 | 
			
		||||
	reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __init fdt_setup(void)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -716,6 +716,9 @@ config ARCH_SUPPORTS_CRASH_HOTPLUG
 | 
			
		|||
	def_bool y
 | 
			
		||||
	depends on PPC64
 | 
			
		||||
 | 
			
		||||
config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
 | 
			
		||||
	def_bool CRASH_RESERVE
 | 
			
		||||
 | 
			
		||||
config FA_DUMP
 | 
			
		||||
	bool "Firmware-assisted dump"
 | 
			
		||||
	depends on CRASH_DUMP && PPC64 && (PPC_RTAS || PPC_POWERNV)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										8
									
								
								arch/powerpc/include/asm/crash_reserve.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								arch/powerpc/include/asm/crash_reserve.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,8 @@
 | 
			
		|||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
			
		||||
#ifndef _ASM_POWERPC_CRASH_RESERVE_H
 | 
			
		||||
#define _ASM_POWERPC_CRASH_RESERVE_H
 | 
			
		||||
 | 
			
		||||
/* crash kernel regions are Page size agliged */
 | 
			
		||||
#define CRASH_ALIGN             PAGE_SIZE
 | 
			
		||||
 | 
			
		||||
#endif /* _ASM_POWERPC_CRASH_RESERVE_H */
 | 
			
		||||
| 
						 | 
				
			
			@ -94,8 +94,10 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long
 | 
			
		|||
int arch_kimage_file_post_load_cleanup(struct kimage *image);
 | 
			
		||||
#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
 | 
			
		||||
 | 
			
		||||
int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
 | 
			
		||||
#define arch_kexec_locate_mem_hole arch_kexec_locate_mem_hole
 | 
			
		||||
int arch_check_excluded_range(struct kimage *image, unsigned long start,
 | 
			
		||||
			      unsigned long end);
 | 
			
		||||
#define arch_check_excluded_range  arch_check_excluded_range
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
int load_crashdump_segments_ppc64(struct kimage *image,
 | 
			
		||||
				  struct kexec_buf *kbuf);
 | 
			
		||||
| 
						 | 
				
			
			@ -112,9 +114,9 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, struct crash_mem
 | 
			
		|||
 | 
			
		||||
#ifdef CONFIG_CRASH_RESERVE
 | 
			
		||||
int __init overlaps_crashkernel(unsigned long start, unsigned long size);
 | 
			
		||||
extern void reserve_crashkernel(void);
 | 
			
		||||
extern void arch_reserve_crashkernel(void);
 | 
			
		||||
#else
 | 
			
		||||
static inline void reserve_crashkernel(void) {}
 | 
			
		||||
static inline void arch_reserve_crashkernel(void) {}
 | 
			
		||||
static inline int overlaps_crashkernel(unsigned long start, unsigned long size) { return 0; }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -860,7 +860,7 @@ void __init early_init_devtree(void *params)
 | 
			
		|||
	 */
 | 
			
		||||
	if (fadump_reserve_mem() == 0)
 | 
			
		||||
#endif
 | 
			
		||||
		reserve_crashkernel();
 | 
			
		||||
		arch_reserve_crashkernel();
 | 
			
		||||
	early_reserve_mem();
 | 
			
		||||
 | 
			
		||||
	if (memory_limit > memblock_phys_mem_size())
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -58,38 +58,20 @@ void machine_kexec(struct kimage *image)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_CRASH_RESERVE
 | 
			
		||||
void __init reserve_crashkernel(void)
 | 
			
		||||
 | 
			
		||||
static unsigned long long __init get_crash_base(unsigned long long crash_base)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long long crash_size, crash_base, total_mem_sz;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
 | 
			
		||||
	/* use common parsing */
 | 
			
		||||
	ret = parse_crashkernel(boot_command_line, total_mem_sz,
 | 
			
		||||
			&crash_size, &crash_base, NULL, NULL);
 | 
			
		||||
	if (ret == 0 && crash_size > 0) {
 | 
			
		||||
		crashk_res.start = crash_base;
 | 
			
		||||
		crashk_res.end = crash_base + crash_size - 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (crashk_res.end == crashk_res.start) {
 | 
			
		||||
		crashk_res.start = crashk_res.end = 0;
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* We might have got these values via the command line or the
 | 
			
		||||
	 * device tree, either way sanitise them now. */
 | 
			
		||||
 | 
			
		||||
	crash_size = resource_size(&crashk_res);
 | 
			
		||||
 | 
			
		||||
#ifndef CONFIG_NONSTATIC_KERNEL
 | 
			
		||||
	if (crashk_res.start != KDUMP_KERNELBASE)
 | 
			
		||||
	if (crash_base != KDUMP_KERNELBASE)
 | 
			
		||||
		printk("Crash kernel location must be 0x%x\n",
 | 
			
		||||
				KDUMP_KERNELBASE);
 | 
			
		||||
 | 
			
		||||
	crashk_res.start = KDUMP_KERNELBASE;
 | 
			
		||||
	return KDUMP_KERNELBASE;
 | 
			
		||||
#else
 | 
			
		||||
	if (!crashk_res.start) {
 | 
			
		||||
	unsigned long long crash_base_align;
 | 
			
		||||
 | 
			
		||||
	if (!crash_base) {
 | 
			
		||||
#ifdef CONFIG_PPC64
 | 
			
		||||
		/*
 | 
			
		||||
		 * On the LPAR platform place the crash kernel to mid of
 | 
			
		||||
| 
						 | 
				
			
			@ -101,53 +83,51 @@ void __init reserve_crashkernel(void)
 | 
			
		|||
		 * kernel starts at 128MB offset on other platforms.
 | 
			
		||||
		 */
 | 
			
		||||
		if (firmware_has_feature(FW_FEATURE_LPAR))
 | 
			
		||||
			crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_512M);
 | 
			
		||||
			crash_base = min_t(u64, ppc64_rma_size / 2, SZ_512M);
 | 
			
		||||
		else
 | 
			
		||||
			crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_128M);
 | 
			
		||||
			crash_base = min_t(u64, ppc64_rma_size / 2, SZ_128M);
 | 
			
		||||
#else
 | 
			
		||||
		crashk_res.start = KDUMP_KERNELBASE;
 | 
			
		||||
		crash_base = KDUMP_KERNELBASE;
 | 
			
		||||
#endif
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	crash_base = PAGE_ALIGN(crashk_res.start);
 | 
			
		||||
	if (crash_base != crashk_res.start) {
 | 
			
		||||
		printk("Crash kernel base must be aligned to 0x%lx\n",
 | 
			
		||||
				PAGE_SIZE);
 | 
			
		||||
		crashk_res.start = crash_base;
 | 
			
		||||
	}
 | 
			
		||||
	crash_base_align = PAGE_ALIGN(crash_base);
 | 
			
		||||
	if (crash_base != crash_base_align)
 | 
			
		||||
		pr_warn("Crash kernel base must be aligned to 0x%lx\n", PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	return crash_base_align;
 | 
			
		||||
#endif
 | 
			
		||||
	crash_size = PAGE_ALIGN(crash_size);
 | 
			
		||||
	crashk_res.end = crashk_res.start + crash_size - 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __init arch_reserve_crashkernel(void)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long long crash_size, crash_base, crash_end;
 | 
			
		||||
	unsigned long long kernel_start, kernel_size;
 | 
			
		||||
	unsigned long long total_mem_sz;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
 | 
			
		||||
 | 
			
		||||
	/* use common parsing */
 | 
			
		||||
	ret = parse_crashkernel(boot_command_line, total_mem_sz, &crash_size,
 | 
			
		||||
				&crash_base, NULL, NULL);
 | 
			
		||||
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	crash_base = get_crash_base(crash_base);
 | 
			
		||||
	crash_end = crash_base + crash_size - 1;
 | 
			
		||||
 | 
			
		||||
	kernel_start = __pa(_stext);
 | 
			
		||||
	kernel_size = _end - _stext;
 | 
			
		||||
 | 
			
		||||
	/* The crash region must not overlap the current kernel */
 | 
			
		||||
	if (overlaps_crashkernel(__pa(_stext), _end - _stext)) {
 | 
			
		||||
		printk(KERN_WARNING
 | 
			
		||||
			"Crash kernel can not overlap current kernel\n");
 | 
			
		||||
		crashk_res.start = crashk_res.end = 0;
 | 
			
		||||
	if ((kernel_start + kernel_size > crash_base) && (kernel_start <= crash_end)) {
 | 
			
		||||
		pr_warn("Crash kernel can not overlap current kernel\n");
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Crash kernel trumps memory limit */
 | 
			
		||||
	if (memory_limit && memory_limit <= crashk_res.end) {
 | 
			
		||||
		memory_limit = crashk_res.end + 1;
 | 
			
		||||
		total_mem_sz = memory_limit;
 | 
			
		||||
		printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
 | 
			
		||||
		       memory_limit);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
 | 
			
		||||
			"for crashkernel (System RAM: %ldMB)\n",
 | 
			
		||||
			(unsigned long)(crash_size >> 20),
 | 
			
		||||
			(unsigned long)(crashk_res.start >> 20),
 | 
			
		||||
			(unsigned long)(total_mem_sz >> 20));
 | 
			
		||||
 | 
			
		||||
	if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
 | 
			
		||||
	    memblock_reserve(crashk_res.start, crash_size)) {
 | 
			
		||||
		pr_err("Failed to reserve memory for crashkernel!\n");
 | 
			
		||||
		crashk_res.start = crashk_res.end = 0;
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
	reserve_crashkernel_generic(crash_size, crash_base, 0, false);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int __init overlaps_crashkernel(unsigned long start, unsigned long size)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -49,201 +49,18 @@ const struct kexec_file_ops * const kexec_file_loaders[] = {
 | 
			
		|||
	NULL
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
 | 
			
		||||
 *                              in the memory regions between buf_min & buf_max
 | 
			
		||||
 *                              for the buffer. If found, sets kbuf->mem.
 | 
			
		||||
 * @kbuf:                       Buffer contents and memory parameters.
 | 
			
		||||
 * @buf_min:                    Minimum address for the buffer.
 | 
			
		||||
 * @buf_max:                    Maximum address for the buffer.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns 0 on success, negative errno on error.
 | 
			
		||||
 */
 | 
			
		||||
static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
 | 
			
		||||
				      u64 buf_min, u64 buf_max)
 | 
			
		||||
int arch_check_excluded_range(struct kimage *image, unsigned long start,
 | 
			
		||||
			      unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	int ret = -EADDRNOTAVAIL;
 | 
			
		||||
	phys_addr_t start, end;
 | 
			
		||||
	u64 i;
 | 
			
		||||
	struct crash_mem *emem;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	for_each_mem_range_rev(i, &start, &end) {
 | 
			
		||||
		/*
 | 
			
		||||
		 * memblock uses [start, end) convention while it is
 | 
			
		||||
		 * [start, end] here. Fix the off-by-one to have the
 | 
			
		||||
		 * same convention.
 | 
			
		||||
		 */
 | 
			
		||||
		end -= 1;
 | 
			
		||||
	emem = image->arch.exclude_ranges;
 | 
			
		||||
	for (i = 0; i < emem->nr_ranges; i++)
 | 
			
		||||
		if (start < emem->ranges[i].end && end > emem->ranges[i].start)
 | 
			
		||||
			return 1;
 | 
			
		||||
 | 
			
		||||
		if (start > buf_max)
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		/* Memory hole not found */
 | 
			
		||||
		if (end < buf_min)
 | 
			
		||||
			break;
 | 
			
		||||
 | 
			
		||||
		/* Adjust memory region based on the given range */
 | 
			
		||||
		if (start < buf_min)
 | 
			
		||||
			start = buf_min;
 | 
			
		||||
		if (end > buf_max)
 | 
			
		||||
			end = buf_max;
 | 
			
		||||
 | 
			
		||||
		start = ALIGN(start, kbuf->buf_align);
 | 
			
		||||
		if (start < end && (end - start + 1) >= kbuf->memsz) {
 | 
			
		||||
			/* Suitable memory range found. Set kbuf->mem */
 | 
			
		||||
			kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1,
 | 
			
		||||
					       kbuf->buf_align);
 | 
			
		||||
			ret = 0;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
 | 
			
		||||
 *                                  suitable buffer with top down approach.
 | 
			
		||||
 * @kbuf:                           Buffer contents and memory parameters.
 | 
			
		||||
 * @buf_min:                        Minimum address for the buffer.
 | 
			
		||||
 * @buf_max:                        Maximum address for the buffer.
 | 
			
		||||
 * @emem:                           Exclude memory ranges.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns 0 on success, negative errno on error.
 | 
			
		||||
 */
 | 
			
		||||
static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf,
 | 
			
		||||
					  u64 buf_min, u64 buf_max,
 | 
			
		||||
					  const struct crash_mem *emem)
 | 
			
		||||
{
 | 
			
		||||
	int i, ret = 0, err = -EADDRNOTAVAIL;
 | 
			
		||||
	u64 start, end, tmin, tmax;
 | 
			
		||||
 | 
			
		||||
	tmax = buf_max;
 | 
			
		||||
	for (i = (emem->nr_ranges - 1); i >= 0; i--) {
 | 
			
		||||
		start = emem->ranges[i].start;
 | 
			
		||||
		end = emem->ranges[i].end;
 | 
			
		||||
 | 
			
		||||
		if (start > tmax)
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		if (end < tmax) {
 | 
			
		||||
			tmin = (end < buf_min ? buf_min : end + 1);
 | 
			
		||||
			ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
 | 
			
		||||
			if (!ret)
 | 
			
		||||
	return 0;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		tmax = start - 1;
 | 
			
		||||
 | 
			
		||||
		if (tmax < buf_min) {
 | 
			
		||||
			ret = err;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
		ret = 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!ret) {
 | 
			
		||||
		tmin = buf_min;
 | 
			
		||||
		ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
 | 
			
		||||
	}
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
 | 
			
		||||
 *                               in the memory regions between buf_min & buf_max
 | 
			
		||||
 *                               for the buffer. If found, sets kbuf->mem.
 | 
			
		||||
 * @kbuf:                        Buffer contents and memory parameters.
 | 
			
		||||
 * @buf_min:                     Minimum address for the buffer.
 | 
			
		||||
 * @buf_max:                     Maximum address for the buffer.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns 0 on success, negative errno on error.
 | 
			
		||||
 */
 | 
			
		||||
static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
 | 
			
		||||
				       u64 buf_min, u64 buf_max)
 | 
			
		||||
{
 | 
			
		||||
	int ret = -EADDRNOTAVAIL;
 | 
			
		||||
	phys_addr_t start, end;
 | 
			
		||||
	u64 i;
 | 
			
		||||
 | 
			
		||||
	for_each_mem_range(i, &start, &end) {
 | 
			
		||||
		/*
 | 
			
		||||
		 * memblock uses [start, end) convention while it is
 | 
			
		||||
		 * [start, end] here. Fix the off-by-one to have the
 | 
			
		||||
		 * same convention.
 | 
			
		||||
		 */
 | 
			
		||||
		end -= 1;
 | 
			
		||||
 | 
			
		||||
		if (end < buf_min)
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		/* Memory hole not found */
 | 
			
		||||
		if (start > buf_max)
 | 
			
		||||
			break;
 | 
			
		||||
 | 
			
		||||
		/* Adjust memory region based on the given range */
 | 
			
		||||
		if (start < buf_min)
 | 
			
		||||
			start = buf_min;
 | 
			
		||||
		if (end > buf_max)
 | 
			
		||||
			end = buf_max;
 | 
			
		||||
 | 
			
		||||
		start = ALIGN(start, kbuf->buf_align);
 | 
			
		||||
		if (start < end && (end - start + 1) >= kbuf->memsz) {
 | 
			
		||||
			/* Suitable memory range found. Set kbuf->mem */
 | 
			
		||||
			kbuf->mem = start;
 | 
			
		||||
			ret = 0;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
 | 
			
		||||
 *                                   suitable buffer with bottom up approach.
 | 
			
		||||
 * @kbuf:                            Buffer contents and memory parameters.
 | 
			
		||||
 * @buf_min:                         Minimum address for the buffer.
 | 
			
		||||
 * @buf_max:                         Maximum address for the buffer.
 | 
			
		||||
 * @emem:                            Exclude memory ranges.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns 0 on success, negative errno on error.
 | 
			
		||||
 */
 | 
			
		||||
static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
 | 
			
		||||
					   u64 buf_min, u64 buf_max,
 | 
			
		||||
					   const struct crash_mem *emem)
 | 
			
		||||
{
 | 
			
		||||
	int i, ret = 0, err = -EADDRNOTAVAIL;
 | 
			
		||||
	u64 start, end, tmin, tmax;
 | 
			
		||||
 | 
			
		||||
	tmin = buf_min;
 | 
			
		||||
	for (i = 0; i < emem->nr_ranges; i++) {
 | 
			
		||||
		start = emem->ranges[i].start;
 | 
			
		||||
		end = emem->ranges[i].end;
 | 
			
		||||
 | 
			
		||||
		if (end < tmin)
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		if (start > tmin) {
 | 
			
		||||
			tmax = (start > buf_max ? buf_max : start - 1);
 | 
			
		||||
			ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
 | 
			
		||||
			if (!ret)
 | 
			
		||||
				return 0;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		tmin = end + 1;
 | 
			
		||||
 | 
			
		||||
		if (tmin > buf_max) {
 | 
			
		||||
			ret = err;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
		ret = 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!ret) {
 | 
			
		||||
		tmax = buf_max;
 | 
			
		||||
		ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
 | 
			
		||||
	}
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_CRASH_DUMP
 | 
			
		||||
| 
						 | 
				
			
			@ -1004,64 +821,6 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, struct crash_mem
 | 
			
		|||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
 | 
			
		||||
 *                              tce-table, reserved-ranges & such (exclude
 | 
			
		||||
 *                              memory ranges) as they can't be used for kexec
 | 
			
		||||
 *                              segment buffer. Sets kbuf->mem when a suitable
 | 
			
		||||
 *                              memory hole is found.
 | 
			
		||||
 * @kbuf:                       Buffer contents and memory parameters.
 | 
			
		||||
 *
 | 
			
		||||
 * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns 0 on success, negative errno on error.
 | 
			
		||||
 */
 | 
			
		||||
int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
 | 
			
		||||
{
 | 
			
		||||
	struct crash_mem **emem;
 | 
			
		||||
	u64 buf_min, buf_max;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	/* Look up the exclude ranges list while locating the memory hole */
 | 
			
		||||
	emem = &(kbuf->image->arch.exclude_ranges);
 | 
			
		||||
	if (!(*emem) || ((*emem)->nr_ranges == 0)) {
 | 
			
		||||
		pr_warn("No exclude range list. Using the default locate mem hole method\n");
 | 
			
		||||
		return kexec_locate_mem_hole(kbuf);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	buf_min = kbuf->buf_min;
 | 
			
		||||
	buf_max = kbuf->buf_max;
 | 
			
		||||
	/* Segments for kdump kernel should be within crashkernel region */
 | 
			
		||||
	if (IS_ENABLED(CONFIG_CRASH_DUMP) && kbuf->image->type == KEXEC_TYPE_CRASH) {
 | 
			
		||||
		buf_min = (buf_min < crashk_res.start ?
 | 
			
		||||
			   crashk_res.start : buf_min);
 | 
			
		||||
		buf_max = (buf_max > crashk_res.end ?
 | 
			
		||||
			   crashk_res.end : buf_max);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (buf_min > buf_max) {
 | 
			
		||||
		pr_err("Invalid buffer min and/or max values\n");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (kbuf->top_down)
 | 
			
		||||
		ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max,
 | 
			
		||||
						     *emem);
 | 
			
		||||
	else
 | 
			
		||||
		ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max,
 | 
			
		||||
						      *emem);
 | 
			
		||||
 | 
			
		||||
	/* Add the buffer allocated to the exclude list for the next lookup */
 | 
			
		||||
	if (!ret) {
 | 
			
		||||
		add_mem_range(emem, kbuf->mem, kbuf->memsz);
 | 
			
		||||
		sort_memory_ranges(*emem, true);
 | 
			
		||||
	} else {
 | 
			
		||||
		pr_err("Failed to locate memory buffer of size %lu\n",
 | 
			
		||||
		       kbuf->memsz);
 | 
			
		||||
	}
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * arch_kexec_kernel_image_probe - Does additional handling needed to setup
 | 
			
		||||
 *                                 kexec segments.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -338,7 +338,7 @@ static int __init add_system_ram_resources(void)
 | 
			
		|||
			 */
 | 
			
		||||
			res->end = end - 1;
 | 
			
		||||
			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 | 
			
		||||
			WARN_ON(request_resource(&iomem_resource, res) < 0);
 | 
			
		||||
			WARN_ON(insert_resource(&iomem_resource, res) < 0);
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1393,21 +1393,19 @@ static void __init arch_reserve_crashkernel(void)
 | 
			
		|||
{
 | 
			
		||||
	unsigned long long low_size = 0;
 | 
			
		||||
	unsigned long long crash_base, crash_size;
 | 
			
		||||
	char *cmdline = boot_command_line;
 | 
			
		||||
	bool high = false;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
 | 
			
		||||
	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
 | 
			
		||||
				&crash_size, &crash_base,
 | 
			
		||||
				&low_size, &high);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	reserve_crashkernel_generic(cmdline, crash_size, crash_base,
 | 
			
		||||
				    low_size, high);
 | 
			
		||||
	reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __init paging_init(void)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -578,14 +578,13 @@ static void __init memblock_x86_reserve_range_setup_data(void)
 | 
			
		|||
static void __init arch_reserve_crashkernel(void)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long long crash_base, crash_size, low_size = 0;
 | 
			
		||||
	char *cmdline = boot_command_line;
 | 
			
		||||
	bool high = false;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
 | 
			
		||||
	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
 | 
			
		||||
				&crash_size, &crash_base,
 | 
			
		||||
				&low_size, &high);
 | 
			
		||||
	if (ret)
 | 
			
		||||
| 
						 | 
				
			
			@ -596,8 +595,7 @@ static void __init arch_reserve_crashkernel(void)
 | 
			
		|||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	reserve_crashkernel_generic(cmdline, crash_size, crash_base,
 | 
			
		||||
				    low_size, high);
 | 
			
		||||
	reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct resource standard_io_resources[] = {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2586,7 +2586,7 @@ int hl_cs_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
 | 
			
		|||
		cs_seq = args->in.seq;
 | 
			
		||||
 | 
			
		||||
	timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
 | 
			
		||||
			? msecs_to_jiffies(args->in.timeout * 1000)
 | 
			
		||||
			? secs_to_jiffies(args->in.timeout)
 | 
			
		||||
			: hpriv->hdev->timeout_jiffies;
 | 
			
		||||
 | 
			
		||||
	switch (cs_type) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1403,7 +1403,7 @@ static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
 | 
			
		|||
		return rc;
 | 
			
		||||
 | 
			
		||||
	if (value)
 | 
			
		||||
		hdev->timeout_jiffies = msecs_to_jiffies(value * 1000);
 | 
			
		||||
		hdev->timeout_jiffies = secs_to_jiffies(value);
 | 
			
		||||
	else
 | 
			
		||||
		hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2091,7 +2091,7 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
 | 
			
		|||
	dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
 | 
			
		||||
		hdev->device_release_watchdog_timeout_sec);
 | 
			
		||||
	schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
 | 
			
		||||
				msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
 | 
			
		||||
				secs_to_jiffies(hdev->device_release_watchdog_timeout_sec));
 | 
			
		||||
	hdev->reset_info.watchdog_active = 1;
 | 
			
		||||
out:
 | 
			
		||||
	spin_unlock(&hdev->reset_info.lock);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -386,7 +386,7 @@ static int fixup_device_params(struct hl_device *hdev)
 | 
			
		|||
	hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
 | 
			
		||||
 | 
			
		||||
	if (tmp_timeout)
 | 
			
		||||
		hdev->timeout_jiffies = msecs_to_jiffies(tmp_timeout * MSEC_PER_SEC);
 | 
			
		||||
		hdev->timeout_jiffies = secs_to_jiffies(tmp_timeout);
 | 
			
		||||
	else
 | 
			
		||||
		hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -160,8 +160,7 @@ void zpodd_on_suspend(struct ata_device *dev)
 | 
			
		|||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expires = zpodd->last_ready +
 | 
			
		||||
		  msecs_to_jiffies(zpodd_poweroff_delay * 1000);
 | 
			
		||||
	expires = zpodd->last_ready + secs_to_jiffies(zpodd_poweroff_delay);
 | 
			
		||||
	if (time_before(jiffies, expires))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -160,7 +160,7 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
 | 
			
		|||
		wait_event_timeout(cmdq->waitq,
 | 
			
		||||
				   !crsqe->is_in_used ||
 | 
			
		||||
				   test_bit(ERR_DEVICE_DETACHED, &cmdq->flags),
 | 
			
		||||
				   msecs_to_jiffies(rcfw->max_timeout * 1000));
 | 
			
		||||
				   secs_to_jiffies(rcfw->max_timeout));
 | 
			
		||||
 | 
			
		||||
		if (!crsqe->is_in_used)
 | 
			
		||||
			return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4469,11 +4469,9 @@ static void nvme_fw_act_work(struct work_struct *work)
 | 
			
		|||
	nvme_auth_stop(ctrl);
 | 
			
		||||
 | 
			
		||||
	if (ctrl->mtfa)
 | 
			
		||||
		fw_act_timeout = jiffies +
 | 
			
		||||
				msecs_to_jiffies(ctrl->mtfa * 100);
 | 
			
		||||
		fw_act_timeout = jiffies + msecs_to_jiffies(ctrl->mtfa * 100);
 | 
			
		||||
	else
 | 
			
		||||
		fw_act_timeout = jiffies +
 | 
			
		||||
				msecs_to_jiffies(admin_timeout * 1000);
 | 
			
		||||
		fw_act_timeout = jiffies + secs_to_jiffies(admin_timeout);
 | 
			
		||||
 | 
			
		||||
	nvme_quiesce_io_queues(ctrl);
 | 
			
		||||
	while (nvme_ctrl_pp_status(ctrl)) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -455,7 +455,7 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
 | 
			
		|||
		blocking_notifier_call_chain(&ec_dev->panic_notifier, 0, ec_dev);
 | 
			
		||||
		kobject_uevent_env(&ec_dev->dev->kobj, KOBJ_CHANGE, (char **)env);
 | 
			
		||||
		/* Begin orderly shutdown. EC will force reset after a short period. */
 | 
			
		||||
		hw_protection_shutdown("CrOS EC Panic", -1);
 | 
			
		||||
		__hw_protection_trigger("CrOS EC Panic", -1, HWPROT_ACT_SHUTDOWN);
 | 
			
		||||
		/* Do not query for other events after a panic is reported */
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -502,8 +502,7 @@ static int da9030_battery_probe(struct platform_device *pdev)
 | 
			
		|||
 | 
			
		||||
	/* 10 seconds between monitor runs unless platform defines other
 | 
			
		||||
	   interval */
 | 
			
		||||
	charger->interval = msecs_to_jiffies(
 | 
			
		||||
		(pdata->batmon_interval ? : 10) * 1000);
 | 
			
		||||
	charger->interval = secs_to_jiffies(pdata->batmon_interval ? : 10);
 | 
			
		||||
 | 
			
		||||
	charger->charge_milliamp = pdata->charge_milliamp;
 | 
			
		||||
	charger->charge_millivolt = pdata->charge_millivolt;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5282,7 +5282,7 @@ static void regulator_handle_critical(struct regulator_dev *rdev,
 | 
			
		|||
	if (!reason)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	hw_protection_shutdown(reason,
 | 
			
		||||
	hw_protection_trigger(reason,
 | 
			
		||||
			      rdev->constraints->uv_less_critical_window_ms);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -64,7 +64,7 @@ static void regulator_notifier_isr_work(struct work_struct *work)
 | 
			
		|||
reread:
 | 
			
		||||
	if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
 | 
			
		||||
		if (!d->die)
 | 
			
		||||
			return hw_protection_shutdown("Regulator HW failure? - no IC recovery",
 | 
			
		||||
			return hw_protection_trigger("Regulator HW failure? - no IC recovery",
 | 
			
		||||
						     REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
 | 
			
		||||
		ret = d->die(rid);
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			@ -72,7 +72,7 @@ static void regulator_notifier_isr_work(struct work_struct *work)
 | 
			
		|||
		 * nothing else left to do...
 | 
			
		||||
		 */
 | 
			
		||||
		if (ret)
 | 
			
		||||
			return hw_protection_shutdown("Regulator HW failure. IC recovery failed",
 | 
			
		||||
			return hw_protection_trigger("Regulator HW failure. IC recovery failed",
 | 
			
		||||
						     REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			@ -263,13 +263,13 @@ static irqreturn_t regulator_notifier_isr(int irq, void *data)
 | 
			
		|||
	if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
 | 
			
		||||
		/* If we have no recovery, just try shut down straight away */
 | 
			
		||||
		if (!d->die) {
 | 
			
		||||
			hw_protection_shutdown("Regulator failure. Retry count exceeded",
 | 
			
		||||
			hw_protection_trigger("Regulator failure. Retry count exceeded",
 | 
			
		||||
					      REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
 | 
			
		||||
		} else {
 | 
			
		||||
			ret = d->die(rid);
 | 
			
		||||
			/* If die() failed shut down as a last attempt to save the HW */
 | 
			
		||||
			if (ret)
 | 
			
		||||
				hw_protection_shutdown("Regulator failure. Recovery failed",
 | 
			
		||||
				hw_protection_trigger("Regulator failure. Recovery failed",
 | 
			
		||||
						      REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -369,7 +369,8 @@ void thermal_governor_update_tz(struct thermal_zone_device *tz,
 | 
			
		|||
	tz->governor->update_tz(tz, reason);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void thermal_zone_device_halt(struct thermal_zone_device *tz, bool shutdown)
 | 
			
		||||
static void thermal_zone_device_halt(struct thermal_zone_device *tz,
 | 
			
		||||
				     enum hw_protection_action action)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * poweroff_delay_ms must be a carefully profiled positive value.
 | 
			
		||||
| 
						 | 
				
			
			@ -380,21 +381,23 @@ static void thermal_zone_device_halt(struct thermal_zone_device *tz, bool shutdo
 | 
			
		|||
 | 
			
		||||
	dev_emerg(&tz->device, "%s: critical temperature reached\n", tz->type);
 | 
			
		||||
 | 
			
		||||
	if (shutdown)
 | 
			
		||||
		hw_protection_shutdown(msg, poweroff_delay_ms);
 | 
			
		||||
	else
 | 
			
		||||
		hw_protection_reboot(msg, poweroff_delay_ms);
 | 
			
		||||
	__hw_protection_trigger(msg, poweroff_delay_ms, action);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void thermal_zone_device_critical(struct thermal_zone_device *tz)
 | 
			
		||||
{
 | 
			
		||||
	thermal_zone_device_halt(tz, true);
 | 
			
		||||
	thermal_zone_device_halt(tz, HWPROT_ACT_DEFAULT);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(thermal_zone_device_critical);
 | 
			
		||||
 | 
			
		||||
void thermal_zone_device_critical_shutdown(struct thermal_zone_device *tz)
 | 
			
		||||
{
 | 
			
		||||
	thermal_zone_device_halt(tz, HWPROT_ACT_SHUTDOWN);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void thermal_zone_device_critical_reboot(struct thermal_zone_device *tz)
 | 
			
		||||
{
 | 
			
		||||
	thermal_zone_device_halt(tz, false);
 | 
			
		||||
	thermal_zone_device_halt(tz, HWPROT_ACT_REBOOT);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void handle_critical_trips(struct thermal_zone_device *tz,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -262,6 +262,7 @@ int thermal_build_list_of_policies(char *buf);
 | 
			
		|||
void __thermal_zone_device_update(struct thermal_zone_device *tz,
 | 
			
		||||
				  enum thermal_notify_event event);
 | 
			
		||||
void thermal_zone_device_critical_reboot(struct thermal_zone_device *tz);
 | 
			
		||||
void thermal_zone_device_critical_shutdown(struct thermal_zone_device *tz);
 | 
			
		||||
void thermal_governor_update_tz(struct thermal_zone_device *tz,
 | 
			
		||||
				enum thermal_notify_event reason);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -405,9 +405,12 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node *
 | 
			
		|||
	of_ops.should_bind = thermal_of_should_bind;
 | 
			
		||||
 | 
			
		||||
	ret = of_property_read_string(np, "critical-action", &action);
 | 
			
		||||
	if (!ret)
 | 
			
		||||
		if (!of_ops.critical && !strcasecmp(action, "reboot"))
 | 
			
		||||
	if (!ret && !of_ops.critical) {
 | 
			
		||||
		if (!strcasecmp(action, "reboot"))
 | 
			
		||||
			of_ops.critical = thermal_zone_device_critical_reboot;
 | 
			
		||||
		else if (!strcasecmp(action, "shutdown"))
 | 
			
		||||
			of_ops.critical = thermal_zone_device_critical_shutdown;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	tz = thermal_zone_device_register_with_trips(np->name, trips, ntrips,
 | 
			
		||||
						     data, &of_ops, &tzp,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1564,7 +1564,7 @@ static int transaction_kthread(void *arg)
 | 
			
		|||
 | 
			
		||||
	do {
 | 
			
		||||
		cannot_commit = false;
 | 
			
		||||
		delay = msecs_to_jiffies(fs_info->commit_interval * 1000);
 | 
			
		||||
		delay = secs_to_jiffies(fs_info->commit_interval);
 | 
			
		||||
		mutex_lock(&fs_info->transaction_kthread_mutex);
 | 
			
		||||
 | 
			
		||||
		spin_lock(&fs_info->trans_lock);
 | 
			
		||||
| 
						 | 
				
			
			@ -1579,9 +1579,9 @@ static int transaction_kthread(void *arg)
 | 
			
		|||
		    cur->state < TRANS_STATE_COMMIT_PREP &&
 | 
			
		||||
		    delta < fs_info->commit_interval) {
 | 
			
		||||
			spin_unlock(&fs_info->trans_lock);
 | 
			
		||||
			delay -= msecs_to_jiffies((delta - 1) * 1000);
 | 
			
		||||
			delay -= secs_to_jiffies(delta - 1);
 | 
			
		||||
			delay = min(delay,
 | 
			
		||||
				    msecs_to_jiffies(fs_info->commit_interval * 1000));
 | 
			
		||||
				    secs_to_jiffies(fs_info->commit_interval));
 | 
			
		||||
			goto sleep;
 | 
			
		||||
		}
 | 
			
		||||
		transid = cur->transid;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1803,6 +1803,14 @@ static int __ocfs2_find_path(struct ocfs2_caching_info *ci,
 | 
			
		|||
 | 
			
		||||
	el = root_el;
 | 
			
		||||
	while (el->l_tree_depth) {
 | 
			
		||||
		if (unlikely(le16_to_cpu(el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH)) {
 | 
			
		||||
			ocfs2_error(ocfs2_metadata_cache_get_super(ci),
 | 
			
		||||
				    "Owner %llu has invalid tree depth %u in extent list\n",
 | 
			
		||||
				    (unsigned long long)ocfs2_metadata_cache_owner(ci),
 | 
			
		||||
				    le16_to_cpu(el->l_tree_depth));
 | 
			
		||||
			ret = -EROFS;
 | 
			
		||||
			goto out;
 | 
			
		||||
		}
 | 
			
		||||
		if (le16_to_cpu(el->l_next_free_rec) == 0) {
 | 
			
		||||
			ocfs2_error(ocfs2_metadata_cache_get_super(ci),
 | 
			
		||||
				    "Owner %llu has empty extent list at depth %u\n",
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -46,7 +46,6 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
 | 
			
		|||
	struct buffer_head *bh = NULL;
 | 
			
		||||
	struct buffer_head *buffer_cache_bh = NULL;
 | 
			
		||||
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 | 
			
		||||
	void *kaddr;
 | 
			
		||||
 | 
			
		||||
	trace_ocfs2_symlink_get_block(
 | 
			
		||||
			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 | 
			
		||||
| 
						 | 
				
			
			@ -91,17 +90,11 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
 | 
			
		|||
		 * could've happened. Since we've got a reference on
 | 
			
		||||
		 * the bh, even if it commits while we're doing the
 | 
			
		||||
		 * copy, the data is still good. */
 | 
			
		||||
		if (buffer_jbd(buffer_cache_bh)
 | 
			
		||||
		    && ocfs2_inode_is_new(inode)) {
 | 
			
		||||
			kaddr = kmap_atomic(bh_result->b_page);
 | 
			
		||||
			if (!kaddr) {
 | 
			
		||||
				mlog(ML_ERROR, "couldn't kmap!\n");
 | 
			
		||||
				goto bail;
 | 
			
		||||
			}
 | 
			
		||||
			memcpy(kaddr + (bh_result->b_size * iblock),
 | 
			
		||||
		if (buffer_jbd(buffer_cache_bh) && ocfs2_inode_is_new(inode)) {
 | 
			
		||||
			memcpy_to_folio(bh_result->b_folio,
 | 
			
		||||
					bh_result->b_size * iblock,
 | 
			
		||||
					buffer_cache_bh->b_data,
 | 
			
		||||
					bh_result->b_size);
 | 
			
		||||
			kunmap_atomic(kaddr);
 | 
			
		||||
			set_buffer_uptodate(bh_result);
 | 
			
		||||
		}
 | 
			
		||||
		brelse(buffer_cache_bh);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -273,7 +273,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
 | 
			
		|||
	if (new)
 | 
			
		||||
		memset(bh->b_data, 0, sb->s_blocksize);
 | 
			
		||||
	memcpy(bh->b_data + offset, data, len);
 | 
			
		||||
	flush_dcache_page(bh->b_page);
 | 
			
		||||
	flush_dcache_folio(bh->b_folio);
 | 
			
		||||
	set_buffer_uptodate(bh);
 | 
			
		||||
	unlock_buffer(bh);
 | 
			
		||||
	ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -416,7 +416,7 @@ static const struct file_operations proc_pid_cmdline_ops = {
 | 
			
		|||
#ifdef CONFIG_KALLSYMS
 | 
			
		||||
/*
 | 
			
		||||
 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
 | 
			
		||||
 * Returns the resolved symbol.  If that fails, simply return the address.
 | 
			
		||||
 * Returns the resolved symbol to user space.
 | 
			
		||||
 */
 | 
			
		||||
static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
 | 
			
		||||
			  struct pid *pid, struct task_struct *task)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -230,7 +230,7 @@ xfs_blockgc_queue(
 | 
			
		|||
	rcu_read_lock();
 | 
			
		||||
	if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
 | 
			
		||||
		queue_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work,
 | 
			
		||||
				   msecs_to_jiffies(xfs_blockgc_secs * 1000));
 | 
			
		||||
				   secs_to_jiffies(xfs_blockgc_secs));
 | 
			
		||||
	rcu_read_unlock();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -569,8 +569,8 @@ retry_timeout_seconds_store(
 | 
			
		|||
	if (val == -1)
 | 
			
		||||
		cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
 | 
			
		||||
	else {
 | 
			
		||||
		cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
 | 
			
		||||
		ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
 | 
			
		||||
		cfg->retry_timeout = secs_to_jiffies(val);
 | 
			
		||||
		ASSERT(secs_to_jiffies(val) < LONG_MAX);
 | 
			
		||||
	}
 | 
			
		||||
	return count;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -687,8 +687,8 @@ xfs_error_sysfs_init_class(
 | 
			
		|||
		if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
 | 
			
		||||
			cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
 | 
			
		||||
		else
 | 
			
		||||
			cfg->retry_timeout = msecs_to_jiffies(
 | 
			
		||||
					init[i].retry_timeout * MSEC_PER_SEC);
 | 
			
		||||
			cfg->retry_timeout =
 | 
			
		||||
					secs_to_jiffies(init[i].retry_timeout);
 | 
			
		||||
	}
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -148,7 +148,7 @@ static inline int suspend_disable_secondary_cpus(void)
 | 
			
		|||
}
 | 
			
		||||
static inline void suspend_enable_secondary_cpus(void)
 | 
			
		||||
{
 | 
			
		||||
	return thaw_secondary_cpus();
 | 
			
		||||
	thaw_secondary_cpus();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#else /* !CONFIG_PM_SLEEP_SMP */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -32,13 +32,12 @@ int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
 | 
			
		|||
#define CRASH_ADDR_HIGH_MAX		memblock_end_of_DRAM()
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
void __init reserve_crashkernel_generic(char *cmdline,
 | 
			
		||||
		unsigned long long crash_size,
 | 
			
		||||
void __init reserve_crashkernel_generic(unsigned long long crash_size,
 | 
			
		||||
					unsigned long long crash_base,
 | 
			
		||||
					unsigned long long crash_low_size,
 | 
			
		||||
					bool high);
 | 
			
		||||
#else
 | 
			
		||||
static inline void __init reserve_crashkernel_generic(char *cmdline,
 | 
			
		||||
static inline void __init reserve_crashkernel_generic(
 | 
			
		||||
		unsigned long long crash_size,
 | 
			
		||||
		unsigned long long crash_base,
 | 
			
		||||
		unsigned long long crash_low_size,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -104,13 +104,9 @@ ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last)	      \
 | 
			
		|||
		if (ITSTART(node) <= last) {		/* Cond1 */	      \
 | 
			
		||||
			if (start <= ITLAST(node))	/* Cond2 */	      \
 | 
			
		||||
				return node;	/* node is leftmost match */  \
 | 
			
		||||
			if (node->ITRB.rb_right) {			      \
 | 
			
		||||
				node = rb_entry(node->ITRB.rb_right,	      \
 | 
			
		||||
						ITSTRUCT, ITRB);	      \
 | 
			
		||||
				if (start <= node->ITSUBTREE)		      \
 | 
			
		||||
			node = rb_entry(node->ITRB.rb_right, ITSTRUCT, ITRB); \
 | 
			
		||||
			continue;					      \
 | 
			
		||||
		}							      \
 | 
			
		||||
		}							      \
 | 
			
		||||
		return NULL;	/* No match */				      \
 | 
			
		||||
	}								      \
 | 
			
		||||
}									      \
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -154,15 +154,20 @@ enum {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
/* helpers to define resources */
 | 
			
		||||
#define DEFINE_RES_NAMED(_start, _size, _name, _flags)			\
 | 
			
		||||
#define DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, _desc)	\
 | 
			
		||||
(struct resource) {							\
 | 
			
		||||
		.start = (_start),					\
 | 
			
		||||
		.end = (_start) + (_size) - 1,				\
 | 
			
		||||
		.name = (_name),					\
 | 
			
		||||
		.flags = (_flags),					\
 | 
			
		||||
		.desc = IORES_DESC_NONE,				\
 | 
			
		||||
		.desc = (_desc),					\
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
#define DEFINE_RES_NAMED(_start, _size, _name, _flags)			\
 | 
			
		||||
	DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, IORES_DESC_NONE)
 | 
			
		||||
#define DEFINE_RES(_start, _size, _flags)				\
 | 
			
		||||
	DEFINE_RES_NAMED(_start, _size, NULL, _flags)
 | 
			
		||||
 | 
			
		||||
#define DEFINE_RES_IO_NAMED(_start, _size, _name)			\
 | 
			
		||||
	DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
 | 
			
		||||
#define DEFINE_RES_IO(_start, _size)					\
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -203,6 +203,15 @@ static inline int arch_kimage_file_post_load_cleanup(struct kimage *image)
 | 
			
		|||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifndef arch_check_excluded_range
 | 
			
		||||
static inline int arch_check_excluded_range(struct kimage *image,
 | 
			
		||||
					    unsigned long start,
 | 
			
		||||
					    unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_KEXEC_SIG
 | 
			
		||||
#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
 | 
			
		||||
int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -28,6 +28,7 @@ struct hlist_nulls_node {
 | 
			
		|||
#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
 | 
			
		||||
#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
 | 
			
		||||
	((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
 | 
			
		||||
#define HLIST_NULLS_HEAD_INIT(nulls) {.first = (struct hlist_nulls_node *)NULLS_MARKER(nulls)}
 | 
			
		||||
 | 
			
		||||
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -218,7 +218,7 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
 | 
			
		|||
 | 
			
		||||
/* Initialize a min-heap. */
 | 
			
		||||
static __always_inline
 | 
			
		||||
void __min_heap_init_inline(min_heap_char *heap, void *data, int size)
 | 
			
		||||
void __min_heap_init_inline(min_heap_char *heap, void *data, size_t size)
 | 
			
		||||
{
 | 
			
		||||
	heap->nr = 0;
 | 
			
		||||
	heap->size = size;
 | 
			
		||||
| 
						 | 
				
			
			@ -254,7 +254,7 @@ bool __min_heap_full_inline(min_heap_char *heap)
 | 
			
		|||
 | 
			
		||||
/* Sift the element at pos down the heap. */
 | 
			
		||||
static __always_inline
 | 
			
		||||
void __min_heap_sift_down_inline(min_heap_char *heap, int pos, size_t elem_size,
 | 
			
		||||
void __min_heap_sift_down_inline(min_heap_char *heap, size_t pos, size_t elem_size,
 | 
			
		||||
				 const struct min_heap_callbacks *func, void *args)
 | 
			
		||||
{
 | 
			
		||||
	const unsigned long lsbit = elem_size & -elem_size;
 | 
			
		||||
| 
						 | 
				
			
			@ -324,7 +324,7 @@ static __always_inline
 | 
			
		|||
void __min_heapify_all_inline(min_heap_char *heap, size_t elem_size,
 | 
			
		||||
			      const struct min_heap_callbacks *func, void *args)
 | 
			
		||||
{
 | 
			
		||||
	int i;
 | 
			
		||||
	ssize_t i;
 | 
			
		||||
 | 
			
		||||
	for (i = heap->nr / 2 - 1; i >= 0; i--)
 | 
			
		||||
		__min_heap_sift_down_inline(heap, i, elem_size, func, args);
 | 
			
		||||
| 
						 | 
				
			
			@ -379,7 +379,7 @@ bool __min_heap_push_inline(min_heap_char *heap, const void *element, size_t ele
 | 
			
		|||
			    const struct min_heap_callbacks *func, void *args)
 | 
			
		||||
{
 | 
			
		||||
	void *data = heap->data;
 | 
			
		||||
	int pos;
 | 
			
		||||
	size_t pos;
 | 
			
		||||
 | 
			
		||||
	if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap"))
 | 
			
		||||
		return false;
 | 
			
		||||
| 
						 | 
				
			
			@ -428,10 +428,10 @@ bool __min_heap_del_inline(min_heap_char *heap, size_t elem_size, size_t idx,
 | 
			
		|||
	__min_heap_del_inline(container_of(&(_heap)->nr, min_heap_char, nr),	\
 | 
			
		||||
			      __minheap_obj_size(_heap), _idx, _func, _args)
 | 
			
		||||
 | 
			
		||||
void __min_heap_init(min_heap_char *heap, void *data, int size);
 | 
			
		||||
void __min_heap_init(min_heap_char *heap, void *data, size_t size);
 | 
			
		||||
void *__min_heap_peek(struct min_heap_char *heap);
 | 
			
		||||
bool __min_heap_full(min_heap_char *heap);
 | 
			
		||||
void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size,
 | 
			
		||||
void __min_heap_sift_down(min_heap_char *heap, size_t pos, size_t elem_size,
 | 
			
		||||
			  const struct min_heap_callbacks *func, void *args);
 | 
			
		||||
void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx,
 | 
			
		||||
			const struct min_heap_callbacks *func, void *args);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -202,4 +202,6 @@ DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
 | 
			
		|||
DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
 | 
			
		||||
DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
 | 
			
		||||
 | 
			
		||||
extern unsigned long mutex_get_owner(struct mutex *lock);
 | 
			
		||||
 | 
			
		||||
#endif /* __LINUX_MUTEX_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -177,16 +177,38 @@ void ctrl_alt_del(void);
 | 
			
		|||
 | 
			
		||||
extern void orderly_poweroff(bool force);
 | 
			
		||||
extern void orderly_reboot(void);
 | 
			
		||||
void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown);
 | 
			
		||||
 | 
			
		||||
static inline void hw_protection_reboot(const char *reason, int ms_until_forced)
 | 
			
		||||
{
 | 
			
		||||
	__hw_protection_shutdown(reason, ms_until_forced, false);
 | 
			
		||||
}
 | 
			
		||||
/**
 | 
			
		||||
 * enum hw_protection_action - Hardware protection action
 | 
			
		||||
 *
 | 
			
		||||
 * @HWPROT_ACT_DEFAULT:
 | 
			
		||||
 *      The default action should be taken. This is HWPROT_ACT_SHUTDOWN
 | 
			
		||||
 *      by default, but can be overridden.
 | 
			
		||||
 * @HWPROT_ACT_SHUTDOWN:
 | 
			
		||||
 *	The system should be shut down (powered off) for HW protection.
 | 
			
		||||
 * @HWPROT_ACT_REBOOT:
 | 
			
		||||
 *	The system should be rebooted for HW protection.
 | 
			
		||||
 */
 | 
			
		||||
enum hw_protection_action { HWPROT_ACT_DEFAULT, HWPROT_ACT_SHUTDOWN, HWPROT_ACT_REBOOT };
 | 
			
		||||
 | 
			
		||||
static inline void hw_protection_shutdown(const char *reason, int ms_until_forced)
 | 
			
		||||
void __hw_protection_trigger(const char *reason, int ms_until_forced,
 | 
			
		||||
			     enum hw_protection_action action);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * hw_protection_trigger - Trigger default emergency system hardware protection action
 | 
			
		||||
 *
 | 
			
		||||
 * @reason:		Reason of emergency shutdown or reboot to be printed.
 | 
			
		||||
 * @ms_until_forced:	Time to wait for orderly shutdown or reboot before
 | 
			
		||||
 *			triggering it. Negative value disables the forced
 | 
			
		||||
 *			shutdown or reboot.
 | 
			
		||||
 *
 | 
			
		||||
 * Initiate an emergency system shutdown or reboot in order to protect
 | 
			
		||||
 * hardware from further damage. The exact action taken is controllable at
 | 
			
		||||
 * runtime and defaults to shutdown.
 | 
			
		||||
 */
 | 
			
		||||
static inline void hw_protection_trigger(const char *reason, int ms_until_forced)
 | 
			
		||||
{
 | 
			
		||||
	__hw_protection_shutdown(reason, ms_until_forced, true);
 | 
			
		||||
	__hw_protection_trigger(reason, ms_until_forced, HWPROT_ACT_DEFAULT);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1259,7 +1259,7 @@ static inline int rhashtable_replace_fast(
 | 
			
		|||
static inline void rhltable_walk_enter(struct rhltable *hlt,
 | 
			
		||||
				       struct rhashtable_iter *iter)
 | 
			
		||||
{
 | 
			
		||||
	return rhashtable_walk_enter(&hlt->ht, iter);
 | 
			
		||||
	rhashtable_walk_enter(&hlt->ht, iter);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			@ -1275,12 +1275,12 @@ static inline void rhltable_free_and_destroy(struct rhltable *hlt,
 | 
			
		|||
							     void *arg),
 | 
			
		||||
					     void *arg)
 | 
			
		||||
{
 | 
			
		||||
	return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
 | 
			
		||||
	rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void rhltable_destroy(struct rhltable *hlt)
 | 
			
		||||
{
 | 
			
		||||
	return rhltable_free_and_destroy(hlt, NULL, NULL);
 | 
			
		||||
	rhltable_free_and_destroy(hlt, NULL, NULL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif /* _LINUX_RHASHTABLE_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1239,6 +1239,10 @@ struct task_struct {
 | 
			
		|||
	struct mutex_waiter		*blocked_on;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
 | 
			
		||||
	struct mutex			*blocker_mutex;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 | 
			
		||||
	int				non_block_count;
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -92,6 +92,7 @@ typedef unsigned char		unchar;
 | 
			
		|||
typedef unsigned short		ushort;
 | 
			
		||||
typedef unsigned int		uint;
 | 
			
		||||
typedef unsigned long		ulong;
 | 
			
		||||
typedef unsigned long long	ullong;
 | 
			
		||||
 | 
			
		||||
#ifndef __BIT_TYPES_DEFINED__
 | 
			
		||||
#define __BIT_TYPES_DEFINED__
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5,8 +5,10 @@
 | 
			
		|||
#include <linux/kref.h>
 | 
			
		||||
#include <linux/nsproxy.h>
 | 
			
		||||
#include <linux/ns_common.h>
 | 
			
		||||
#include <linux/rculist_nulls.h>
 | 
			
		||||
#include <linux/sched.h>
 | 
			
		||||
#include <linux/workqueue.h>
 | 
			
		||||
#include <linux/rcuref.h>
 | 
			
		||||
#include <linux/rwsem.h>
 | 
			
		||||
#include <linux/sysctl.h>
 | 
			
		||||
#include <linux/err.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -115,10 +117,11 @@ struct user_namespace {
 | 
			
		|||
} __randomize_layout;
 | 
			
		||||
 | 
			
		||||
struct ucounts {
 | 
			
		||||
	struct hlist_node node;
 | 
			
		||||
	struct hlist_nulls_node node;
 | 
			
		||||
	struct user_namespace *ns;
 | 
			
		||||
	kuid_t uid;
 | 
			
		||||
	atomic_t count;
 | 
			
		||||
	struct rcu_head rcu;
 | 
			
		||||
	rcuref_t count;
 | 
			
		||||
	atomic_long_t ucount[UCOUNT_COUNTS];
 | 
			
		||||
	atomic_long_t rlimit[UCOUNT_RLIMIT_COUNTS];
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			@ -131,9 +134,15 @@ void retire_userns_sysctls(struct user_namespace *ns);
 | 
			
		|||
struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
 | 
			
		||||
void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
 | 
			
		||||
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
 | 
			
		||||
struct ucounts * __must_check get_ucounts(struct ucounts *ucounts);
 | 
			
		||||
void put_ucounts(struct ucounts *ucounts);
 | 
			
		||||
 | 
			
		||||
static inline struct ucounts * __must_check get_ucounts(struct ucounts *ucounts)
 | 
			
		||||
{
 | 
			
		||||
	if (rcuref_get(&ucounts->count))
 | 
			
		||||
		return ucounts;
 | 
			
		||||
	return NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline long get_rlimit_value(struct ucounts *ucounts, enum rlimit_type type)
 | 
			
		||||
{
 | 
			
		||||
	return atomic_long_read(&ucounts->rlimit[type]);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -275,6 +275,7 @@ struct vfs_ns_cap_data {
 | 
			
		|||
/* Allow setting encryption key on loopback filesystem */
 | 
			
		||||
/* Allow setting zone reclaim policy */
 | 
			
		||||
/* Allow everything under CAP_BPF and CAP_PERFMON for backward compatibility */
 | 
			
		||||
/* Allow setting hardware protection emergency action */
 | 
			
		||||
 | 
			
		||||
#define CAP_SYS_ADMIN        21
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -375,8 +375,7 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __init reserve_crashkernel_generic(char *cmdline,
 | 
			
		||||
			     unsigned long long crash_size,
 | 
			
		||||
void __init reserve_crashkernel_generic(unsigned long long crash_size,
 | 
			
		||||
					unsigned long long crash_base,
 | 
			
		||||
					unsigned long long crash_low_size,
 | 
			
		||||
					bool high)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1582,6 +1582,17 @@ struct mm_struct *get_task_mm(struct task_struct *task)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(get_task_mm);
 | 
			
		||||
 | 
			
		||||
static bool may_access_mm(struct mm_struct *mm, struct task_struct *task, unsigned int mode)
 | 
			
		||||
{
 | 
			
		||||
	if (mm == current->mm)
 | 
			
		||||
		return true;
 | 
			
		||||
	if (ptrace_may_access(task, mode))
 | 
			
		||||
		return true;
 | 
			
		||||
	if ((mode & PTRACE_MODE_READ) && perfmon_capable())
 | 
			
		||||
		return true;
 | 
			
		||||
	return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
 | 
			
		||||
{
 | 
			
		||||
	struct mm_struct *mm;
 | 
			
		||||
| 
						 | 
				
			
			@ -1594,7 +1605,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
 | 
			
		|||
	mm = get_task_mm(task);
 | 
			
		||||
	if (!mm) {
 | 
			
		||||
		mm = ERR_PTR(-ESRCH);
 | 
			
		||||
	} else if (mm != current->mm && !ptrace_may_access(task, mode)) {
 | 
			
		||||
	} else if (!may_access_mm(mm, task, mode)) {
 | 
			
		||||
		mmput(mm);
 | 
			
		||||
		mm = ERR_PTR(-EACCES);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -93,6 +93,43 @@ static struct notifier_block panic_block = {
 | 
			
		|||
	.notifier_call = hung_task_panic,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
 | 
			
		||||
static void debug_show_blocker(struct task_struct *task)
 | 
			
		||||
{
 | 
			
		||||
	struct task_struct *g, *t;
 | 
			
		||||
	unsigned long owner;
 | 
			
		||||
	struct mutex *lock;
 | 
			
		||||
 | 
			
		||||
	RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "No rcu lock held");
 | 
			
		||||
 | 
			
		||||
	lock = READ_ONCE(task->blocker_mutex);
 | 
			
		||||
	if (!lock)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	owner = mutex_get_owner(lock);
 | 
			
		||||
	if (unlikely(!owner)) {
 | 
			
		||||
		pr_err("INFO: task %s:%d is blocked on a mutex, but the owner is not found.\n",
 | 
			
		||||
			task->comm, task->pid);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Ensure the owner information is correct. */
 | 
			
		||||
	for_each_process_thread(g, t) {
 | 
			
		||||
		if ((unsigned long)t == owner) {
 | 
			
		||||
			pr_err("INFO: task %s:%d is blocked on a mutex likely owned by task %s:%d.\n",
 | 
			
		||||
				task->comm, task->pid, t->comm, t->pid);
 | 
			
		||||
			sched_show_task(t);
 | 
			
		||||
			return;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
static inline void debug_show_blocker(struct task_struct *task)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static void check_hung_task(struct task_struct *t, unsigned long timeout)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long switch_count = t->nvcsw + t->nivcsw;
 | 
			
		||||
| 
						 | 
				
			
			@ -152,6 +189,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
 | 
			
		|||
		pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
 | 
			
		||||
			" disables this message.\n");
 | 
			
		||||
		sched_show_task(t);
 | 
			
		||||
		debug_show_blocker(t);
 | 
			
		||||
		hung_task_show_lock = true;
 | 
			
		||||
 | 
			
		||||
		if (sysctl_hung_task_all_cpu_backtrace)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -210,6 +210,16 @@ int sanity_check_segment_list(struct kimage *image)
 | 
			
		|||
	}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * The destination addresses are searched from system RAM rather than
 | 
			
		||||
	 * being allocated from the buddy allocator, so they are not guaranteed
 | 
			
		||||
	 * to be accepted by the current kernel.  Accept the destination
 | 
			
		||||
	 * addresses before kexec swaps their content with the segments' source
 | 
			
		||||
	 * pages to avoid accessing memory before it is accepted.
 | 
			
		||||
	 */
 | 
			
		||||
	for (i = 0; i < nr_segments; i++)
 | 
			
		||||
		accept_memory(image->segment[i].mem, image->segment[i].memsz);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -390,7 +390,7 @@ int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
 | 
			
		|||
			 struct kexec_buf *kbuf,
 | 
			
		||||
			 unsigned long *lowest_load_addr)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long lowest_addr = UINT_MAX;
 | 
			
		||||
	unsigned long lowest_addr = ULONG_MAX;
 | 
			
		||||
	int ret;
 | 
			
		||||
	size_t i;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -464,6 +464,12 @@ static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
 | 
			
		|||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* Make sure this does not conflict with exclude range */
 | 
			
		||||
		if (arch_check_excluded_range(image, temp_start, temp_end)) {
 | 
			
		||||
			temp_start = temp_start - PAGE_SIZE;
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* We found a suitable memory range */
 | 
			
		||||
		break;
 | 
			
		||||
	} while (1);
 | 
			
		||||
| 
						 | 
				
			
			@ -498,6 +504,12 @@ static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
 | 
			
		|||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* Make sure this does not conflict with exclude range */
 | 
			
		||||
		if (arch_check_excluded_range(image, temp_start, temp_end)) {
 | 
			
		||||
			temp_start = temp_start + PAGE_SIZE;
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* We found a suitable memory range */
 | 
			
		||||
		break;
 | 
			
		||||
	} while (1);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -72,6 +72,14 @@ static inline unsigned long __owner_flags(unsigned long owner)
 | 
			
		|||
	return owner & MUTEX_FLAGS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Do not use the return value as a pointer directly. */
 | 
			
		||||
unsigned long mutex_get_owner(struct mutex *lock)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long owner = atomic_long_read(&lock->owner);
 | 
			
		||||
 | 
			
		||||
	return (unsigned long)__owner_task(owner);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Returns: __mutex_owner(lock) on failure or NULL on success.
 | 
			
		||||
 */
 | 
			
		||||
| 
						 | 
				
			
			@ -182,6 +190,9 @@ static void
 | 
			
		|||
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
 | 
			
		||||
		   struct list_head *list)
 | 
			
		||||
{
 | 
			
		||||
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
 | 
			
		||||
	WRITE_ONCE(current->blocker_mutex, lock);
 | 
			
		||||
#endif
 | 
			
		||||
	debug_mutex_add_waiter(lock, waiter, current);
 | 
			
		||||
 | 
			
		||||
	list_add_tail(&waiter->list, list);
 | 
			
		||||
| 
						 | 
				
			
			@ -197,6 +208,9 @@ __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
 | 
			
		|||
		__mutex_clear_flag(lock, MUTEX_FLAGS);
 | 
			
		||||
 | 
			
		||||
	debug_mutex_remove_waiter(lock, waiter, current);
 | 
			
		||||
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
 | 
			
		||||
	WRITE_ONCE(current->blocker_mutex, NULL);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										140
									
								
								kernel/reboot.c
									
									
									
									
									
								
							
							
						
						
									
										140
									
								
								kernel/reboot.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -36,6 +36,8 @@ enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
 | 
			
		|||
EXPORT_SYMBOL_GPL(reboot_mode);
 | 
			
		||||
enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
 | 
			
		||||
 | 
			
		||||
static enum hw_protection_action hw_protection_action = HWPROT_ACT_SHUTDOWN;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * This variable is used privately to keep track of whether or not
 | 
			
		||||
 * reboot_type is still set to its default value (i.e., reboot= hasn't
 | 
			
		||||
| 
						 | 
				
			
			@ -229,6 +231,9 @@ EXPORT_SYMBOL(unregister_restart_handler);
 | 
			
		|||
/**
 | 
			
		||||
 *	do_kernel_restart - Execute kernel restart handler call chain
 | 
			
		||||
 *
 | 
			
		||||
 *	@cmd: pointer to buffer containing command to execute for restart
 | 
			
		||||
 *		or %NULL
 | 
			
		||||
 *
 | 
			
		||||
 *	Calls functions registered with register_restart_handler.
 | 
			
		||||
 *
 | 
			
		||||
 *	Expected to be called from machine_restart as last step of the restart
 | 
			
		||||
| 
						 | 
				
			
			@ -933,61 +938,86 @@ void orderly_reboot(void)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(orderly_reboot);
 | 
			
		||||
 | 
			
		||||
static const char *hw_protection_action_str(enum hw_protection_action action)
 | 
			
		||||
{
 | 
			
		||||
	switch (action) {
 | 
			
		||||
	case HWPROT_ACT_SHUTDOWN:
 | 
			
		||||
		return "shutdown";
 | 
			
		||||
	case HWPROT_ACT_REBOOT:
 | 
			
		||||
		return "reboot";
 | 
			
		||||
	default:
 | 
			
		||||
		return "undefined";
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static enum hw_protection_action hw_failure_emergency_action;
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * hw_failure_emergency_poweroff_func - emergency poweroff work after a known delay
 | 
			
		||||
 * @work: work_struct associated with the emergency poweroff function
 | 
			
		||||
 * hw_failure_emergency_action_func - emergency action work after a known delay
 | 
			
		||||
 * @work: work_struct associated with the emergency action function
 | 
			
		||||
 *
 | 
			
		||||
 * This function is called in very critical situations to force
 | 
			
		||||
 * a kernel poweroff after a configurable timeout value.
 | 
			
		||||
 * a kernel poweroff or reboot after a configurable timeout value.
 | 
			
		||||
 */
 | 
			
		||||
static void hw_failure_emergency_poweroff_func(struct work_struct *work)
 | 
			
		||||
static void hw_failure_emergency_action_func(struct work_struct *work)
 | 
			
		||||
{
 | 
			
		||||
	const char *action_str = hw_protection_action_str(hw_failure_emergency_action);
 | 
			
		||||
 | 
			
		||||
	pr_emerg("Hardware protection timed-out. Trying forced %s\n",
 | 
			
		||||
		 action_str);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * We have reached here after the emergency shutdown waiting period has
 | 
			
		||||
	 * expired. This means orderly_poweroff has not been able to shut off
 | 
			
		||||
	 * the system for some reason.
 | 
			
		||||
	 * We have reached here after the emergency action waiting period has
 | 
			
		||||
	 * expired. This means orderly_poweroff/reboot has not been able to
 | 
			
		||||
	 * shut off the system for some reason.
 | 
			
		||||
	 *
 | 
			
		||||
	 * Try to shut down the system immediately using kernel_power_off
 | 
			
		||||
	 * if populated
 | 
			
		||||
	 * Try to shut off the system immediately if possible
 | 
			
		||||
	 */
 | 
			
		||||
	pr_emerg("Hardware protection timed-out. Trying forced poweroff\n");
 | 
			
		||||
 | 
			
		||||
	if (hw_failure_emergency_action == HWPROT_ACT_REBOOT)
 | 
			
		||||
		kernel_restart(NULL);
 | 
			
		||||
	else
 | 
			
		||||
		kernel_power_off();
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Worst of the worst case trigger emergency restart
 | 
			
		||||
	 */
 | 
			
		||||
	pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
 | 
			
		||||
	pr_emerg("Hardware protection %s failed. Trying emergency restart\n",
 | 
			
		||||
		 action_str);
 | 
			
		||||
	emergency_restart();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
 | 
			
		||||
			    hw_failure_emergency_poweroff_func);
 | 
			
		||||
static DECLARE_DELAYED_WORK(hw_failure_emergency_action_work,
 | 
			
		||||
			    hw_failure_emergency_action_func);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * hw_failure_emergency_poweroff - Trigger an emergency system poweroff
 | 
			
		||||
 * hw_failure_emergency_schedule - Schedule an emergency system shutdown or reboot
 | 
			
		||||
 *
 | 
			
		||||
 * @action:		The hardware protection action to be taken
 | 
			
		||||
 * @action_delay_ms:	Time in milliseconds to elapse before triggering action
 | 
			
		||||
 *
 | 
			
		||||
 * This may be called from any critical situation to trigger a system shutdown
 | 
			
		||||
 * after a given period of time. If time is negative this is not scheduled.
 | 
			
		||||
 * or reboot after a given period of time.
 | 
			
		||||
 * If time is negative this is not scheduled.
 | 
			
		||||
 */
 | 
			
		||||
static void hw_failure_emergency_poweroff(int poweroff_delay_ms)
 | 
			
		||||
static void hw_failure_emergency_schedule(enum hw_protection_action action,
 | 
			
		||||
					  int action_delay_ms)
 | 
			
		||||
{
 | 
			
		||||
	if (poweroff_delay_ms <= 0)
 | 
			
		||||
	if (action_delay_ms <= 0)
 | 
			
		||||
		return;
 | 
			
		||||
	schedule_delayed_work(&hw_failure_emergency_poweroff_work,
 | 
			
		||||
			      msecs_to_jiffies(poweroff_delay_ms));
 | 
			
		||||
	hw_failure_emergency_action = action;
 | 
			
		||||
	schedule_delayed_work(&hw_failure_emergency_action_work,
 | 
			
		||||
			      msecs_to_jiffies(action_delay_ms));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * __hw_protection_shutdown - Trigger an emergency system shutdown or reboot
 | 
			
		||||
 * __hw_protection_trigger - Trigger an emergency system shutdown or reboot
 | 
			
		||||
 *
 | 
			
		||||
 * @reason:		Reason of emergency shutdown or reboot to be printed.
 | 
			
		||||
 * @ms_until_forced:	Time to wait for orderly shutdown or reboot before
 | 
			
		||||
 *			triggering it. Negative value disables the forced
 | 
			
		||||
 *			shutdown or reboot.
 | 
			
		||||
 * @shutdown:		If true, indicates that a shutdown will happen
 | 
			
		||||
 *			after the critical tempeature is reached.
 | 
			
		||||
 *			If false, indicates that a reboot will happen
 | 
			
		||||
 *			after the critical tempeature is reached.
 | 
			
		||||
 * @action:		The hardware protection action to be taken.
 | 
			
		||||
 *
 | 
			
		||||
 * Initiate an emergency system shutdown or reboot in order to protect
 | 
			
		||||
 * hardware from further damage. Usage examples include a thermal protection.
 | 
			
		||||
| 
						 | 
				
			
			@ -995,11 +1025,16 @@ static void hw_failure_emergency_poweroff(int poweroff_delay_ms)
 | 
			
		|||
 * pending even if the previous request has given a large timeout for forced
 | 
			
		||||
 * shutdown/reboot.
 | 
			
		||||
 */
 | 
			
		||||
void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown)
 | 
			
		||||
void __hw_protection_trigger(const char *reason, int ms_until_forced,
 | 
			
		||||
			     enum hw_protection_action action)
 | 
			
		||||
{
 | 
			
		||||
	static atomic_t allow_proceed = ATOMIC_INIT(1);
 | 
			
		||||
 | 
			
		||||
	pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
 | 
			
		||||
	if (action == HWPROT_ACT_DEFAULT)
 | 
			
		||||
		action = hw_protection_action;
 | 
			
		||||
 | 
			
		||||
	pr_emerg("HARDWARE PROTECTION %s (%s)\n",
 | 
			
		||||
		 hw_protection_action_str(action), reason);
 | 
			
		||||
 | 
			
		||||
	/* Shutdown should be initiated only once. */
 | 
			
		||||
	if (!atomic_dec_and_test(&allow_proceed))
 | 
			
		||||
| 
						 | 
				
			
			@ -1009,13 +1044,55 @@ void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shut
 | 
			
		|||
	 * Queue a backup emergency shutdown in the event of
 | 
			
		||||
	 * orderly_poweroff failure
 | 
			
		||||
	 */
 | 
			
		||||
	hw_failure_emergency_poweroff(ms_until_forced);
 | 
			
		||||
	if (shutdown)
 | 
			
		||||
		orderly_poweroff(true);
 | 
			
		||||
	else
 | 
			
		||||
	hw_failure_emergency_schedule(action, ms_until_forced);
 | 
			
		||||
	if (action == HWPROT_ACT_REBOOT)
 | 
			
		||||
		orderly_reboot();
 | 
			
		||||
	else
 | 
			
		||||
		orderly_poweroff(true);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(__hw_protection_shutdown);
 | 
			
		||||
EXPORT_SYMBOL_GPL(__hw_protection_trigger);
 | 
			
		||||
 | 
			
		||||
static bool hw_protection_action_parse(const char *str,
 | 
			
		||||
				       enum hw_protection_action *action)
 | 
			
		||||
{
 | 
			
		||||
	if (sysfs_streq(str, "shutdown"))
 | 
			
		||||
		*action = HWPROT_ACT_SHUTDOWN;
 | 
			
		||||
	else if (sysfs_streq(str, "reboot"))
 | 
			
		||||
		*action = HWPROT_ACT_REBOOT;
 | 
			
		||||
	else
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int __init hw_protection_setup(char *str)
 | 
			
		||||
{
 | 
			
		||||
	hw_protection_action_parse(str, &hw_protection_action);
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
__setup("hw_protection=", hw_protection_setup);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_SYSFS
 | 
			
		||||
static ssize_t hw_protection_show(struct kobject *kobj,
 | 
			
		||||
				  struct kobj_attribute *attr, char *buf)
 | 
			
		||||
{
 | 
			
		||||
	return sysfs_emit(buf, "%s\n",
 | 
			
		||||
			  hw_protection_action_str(hw_protection_action));
 | 
			
		||||
}
 | 
			
		||||
static ssize_t hw_protection_store(struct kobject *kobj,
 | 
			
		||||
				   struct kobj_attribute *attr, const char *buf,
 | 
			
		||||
				   size_t count)
 | 
			
		||||
{
 | 
			
		||||
	if (!capable(CAP_SYS_ADMIN))
 | 
			
		||||
		return -EPERM;
 | 
			
		||||
 | 
			
		||||
	if (!hw_protection_action_parse(buf, &hw_protection_action))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	return count;
 | 
			
		||||
}
 | 
			
		||||
static struct kobj_attribute hw_protection_attr = __ATTR_RW(hw_protection);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static int __init reboot_setup(char *str)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -1276,6 +1353,7 @@ static struct kobj_attribute reboot_cpu_attr = __ATTR_RW(cpu);
 | 
			
		|||
#endif
 | 
			
		||||
 | 
			
		||||
static struct attribute *reboot_attrs[] = {
 | 
			
		||||
	&hw_protection_attr.attr,
 | 
			
		||||
	&reboot_mode_attr.attr,
 | 
			
		||||
#ifdef CONFIG_X86
 | 
			
		||||
	&reboot_force_attr.attr,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -351,10 +351,9 @@ static struct dentry *relay_create_buf_file(struct rchan *chan,
 | 
			
		|||
	struct dentry *dentry;
 | 
			
		||||
	char *tmpname;
 | 
			
		||||
 | 
			
		||||
	tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
 | 
			
		||||
	tmpname = kasprintf(GFP_KERNEL, "%s%d", chan->base_filename, cpu);
 | 
			
		||||
	if (!tmpname)
 | 
			
		||||
		return NULL;
 | 
			
		||||
	snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
 | 
			
		||||
 | 
			
		||||
	/* Create file in fs */
 | 
			
		||||
	dentry = chan->cb->create_buf_file(tmpname, chan->parent,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -561,8 +561,7 @@ static int __region_intersects(struct resource *parent, resource_size_t start,
 | 
			
		|||
	struct resource res, o;
 | 
			
		||||
	bool covered;
 | 
			
		||||
 | 
			
		||||
	res.start = start;
 | 
			
		||||
	res.end = start + size - 1;
 | 
			
		||||
	res = DEFINE_RES(start, size, 0);
 | 
			
		||||
 | 
			
		||||
	for (p = parent->child; p ; p = p->sibling) {
 | 
			
		||||
		if (!resource_intersection(p, &res, &o))
 | 
			
		||||
| 
						 | 
				
			
			@ -1714,18 +1713,13 @@ static int __init reserve_setup(char *str)
 | 
			
		|||
			 * I/O port space; otherwise assume it's memory.
 | 
			
		||||
			 */
 | 
			
		||||
			if (io_start < 0x10000) {
 | 
			
		||||
				res->flags = IORESOURCE_IO;
 | 
			
		||||
				*res = DEFINE_RES_IO_NAMED(io_start, io_num, "reserved");
 | 
			
		||||
				parent = &ioport_resource;
 | 
			
		||||
			} else {
 | 
			
		||||
				res->flags = IORESOURCE_MEM;
 | 
			
		||||
				*res = DEFINE_RES_MEM_NAMED(io_start, io_num, "reserved");
 | 
			
		||||
				parent = &iomem_resource;
 | 
			
		||||
			}
 | 
			
		||||
			res->name = "reserved";
 | 
			
		||||
			res->start = io_start;
 | 
			
		||||
			res->end = io_start + io_num - 1;
 | 
			
		||||
			res->flags |= IORESOURCE_BUSY;
 | 
			
		||||
			res->desc = IORES_DESC_NONE;
 | 
			
		||||
			res->child = NULL;
 | 
			
		||||
			if (request_resource(parent, res) == 0)
 | 
			
		||||
				reserved = x+1;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -1975,11 +1969,7 @@ get_free_mem_region(struct device *dev, struct resource *base,
 | 
			
		|||
			 */
 | 
			
		||||
			revoke_iomem(res);
 | 
			
		||||
		} else {
 | 
			
		||||
			res->start = addr;
 | 
			
		||||
			res->end = addr + size - 1;
 | 
			
		||||
			res->name = name;
 | 
			
		||||
			res->desc = desc;
 | 
			
		||||
			res->flags = IORESOURCE_MEM;
 | 
			
		||||
			*res = DEFINE_RES_NAMED_DESC(addr, size, name, IORESOURCE_MEM, desc);
 | 
			
		||||
 | 
			
		||||
			/*
 | 
			
		||||
			 * Only succeed if the resource hosts an exclusive
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -176,9 +176,10 @@ static bool recalc_sigpending_tsk(struct task_struct *t)
 | 
			
		|||
 | 
			
		||||
void recalc_sigpending(void)
 | 
			
		||||
{
 | 
			
		||||
	if (!recalc_sigpending_tsk(current) && !freezing(current))
 | 
			
		||||
	if (!recalc_sigpending_tsk(current) && !freezing(current)) {
 | 
			
		||||
		if (unlikely(test_thread_flag(TIF_SIGPENDING)))
 | 
			
		||||
			clear_thread_flag(TIF_SIGPENDING);
 | 
			
		||||
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(recalc_sigpending);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -11,11 +11,14 @@
 | 
			
		|||
struct ucounts init_ucounts = {
 | 
			
		||||
	.ns    = &init_user_ns,
 | 
			
		||||
	.uid   = GLOBAL_ROOT_UID,
 | 
			
		||||
	.count = ATOMIC_INIT(1),
 | 
			
		||||
	.count = RCUREF_INIT(1),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#define UCOUNTS_HASHTABLE_BITS 10
 | 
			
		||||
static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
 | 
			
		||||
#define UCOUNTS_HASHTABLE_ENTRIES (1 << UCOUNTS_HASHTABLE_BITS)
 | 
			
		||||
static struct hlist_nulls_head ucounts_hashtable[UCOUNTS_HASHTABLE_ENTRIES] = {
 | 
			
		||||
	[0 ... UCOUNTS_HASHTABLE_ENTRIES - 1] = HLIST_NULLS_HEAD_INIT(0)
 | 
			
		||||
};
 | 
			
		||||
static DEFINE_SPINLOCK(ucounts_lock);
 | 
			
		||||
 | 
			
		||||
#define ucounts_hashfn(ns, uid)						\
 | 
			
		||||
| 
						 | 
				
			
			@ -24,7 +27,6 @@ static DEFINE_SPINLOCK(ucounts_lock);
 | 
			
		|||
#define ucounts_hashentry(ns, uid)	\
 | 
			
		||||
	(ucounts_hashtable + ucounts_hashfn(ns, uid))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_SYSCTL
 | 
			
		||||
static struct ctl_table_set *
 | 
			
		||||
set_lookup(struct ctl_table_root *root)
 | 
			
		||||
| 
						 | 
				
			
			@ -127,50 +129,39 @@ void retire_userns_sysctls(struct user_namespace *ns)
 | 
			
		|||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
 | 
			
		||||
static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid,
 | 
			
		||||
				    struct hlist_nulls_head *hashent)
 | 
			
		||||
{
 | 
			
		||||
	struct ucounts *ucounts;
 | 
			
		||||
	struct hlist_nulls_node *pos;
 | 
			
		||||
 | 
			
		||||
	hlist_for_each_entry(ucounts, hashent, node) {
 | 
			
		||||
		if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
 | 
			
		||||
	guard(rcu)();
 | 
			
		||||
	hlist_nulls_for_each_entry_rcu(ucounts, pos, hashent, node) {
 | 
			
		||||
		if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns)) {
 | 
			
		||||
			if (rcuref_get(&ucounts->count))
 | 
			
		||||
				return ucounts;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void hlist_add_ucounts(struct ucounts *ucounts)
 | 
			
		||||
{
 | 
			
		||||
	struct hlist_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
 | 
			
		||||
	struct hlist_nulls_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
 | 
			
		||||
 | 
			
		||||
	spin_lock_irq(&ucounts_lock);
 | 
			
		||||
	hlist_add_head(&ucounts->node, hashent);
 | 
			
		||||
	hlist_nulls_add_head_rcu(&ucounts->node, hashent);
 | 
			
		||||
	spin_unlock_irq(&ucounts_lock);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool get_ucounts_or_wrap(struct ucounts *ucounts)
 | 
			
		||||
{
 | 
			
		||||
	/* Returns true on a successful get, false if the count wraps. */
 | 
			
		||||
	return !atomic_add_negative(1, &ucounts->count);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct ucounts *get_ucounts(struct ucounts *ucounts)
 | 
			
		||||
{
 | 
			
		||||
	if (!get_ucounts_or_wrap(ucounts)) {
 | 
			
		||||
		put_ucounts(ucounts);
 | 
			
		||||
		ucounts = NULL;
 | 
			
		||||
	}
 | 
			
		||||
	return ucounts;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
 | 
			
		||||
{
 | 
			
		||||
	struct hlist_head *hashent = ucounts_hashentry(ns, uid);
 | 
			
		||||
	bool wrapped;
 | 
			
		||||
	struct ucounts *ucounts, *new = NULL;
 | 
			
		||||
	struct hlist_nulls_head *hashent = ucounts_hashentry(ns, uid);
 | 
			
		||||
	struct ucounts *ucounts, *new;
 | 
			
		||||
 | 
			
		||||
	spin_lock_irq(&ucounts_lock);
 | 
			
		||||
	ucounts = find_ucounts(ns, uid, hashent);
 | 
			
		||||
	if (!ucounts) {
 | 
			
		||||
		spin_unlock_irq(&ucounts_lock);
 | 
			
		||||
	if (ucounts)
 | 
			
		||||
		return ucounts;
 | 
			
		||||
 | 
			
		||||
	new = kzalloc(sizeof(*new), GFP_KERNEL);
 | 
			
		||||
	if (!new)
 | 
			
		||||
| 
						 | 
				
			
			@ -178,37 +169,33 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
 | 
			
		|||
 | 
			
		||||
	new->ns = ns;
 | 
			
		||||
	new->uid = uid;
 | 
			
		||||
		atomic_set(&new->count, 1);
 | 
			
		||||
	rcuref_init(&new->count, 1);
 | 
			
		||||
 | 
			
		||||
	spin_lock_irq(&ucounts_lock);
 | 
			
		||||
	ucounts = find_ucounts(ns, uid, hashent);
 | 
			
		||||
		if (!ucounts) {
 | 
			
		||||
			hlist_add_head(&new->node, hashent);
 | 
			
		||||
	if (ucounts) {
 | 
			
		||||
		spin_unlock_irq(&ucounts_lock);
 | 
			
		||||
		kfree(new);
 | 
			
		||||
		return ucounts;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	hlist_nulls_add_head_rcu(&new->node, hashent);
 | 
			
		||||
	get_user_ns(new->ns);
 | 
			
		||||
	spin_unlock_irq(&ucounts_lock);
 | 
			
		||||
	return new;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	wrapped = !get_ucounts_or_wrap(ucounts);
 | 
			
		||||
	spin_unlock_irq(&ucounts_lock);
 | 
			
		||||
	kfree(new);
 | 
			
		||||
	if (wrapped) {
 | 
			
		||||
		put_ucounts(ucounts);
 | 
			
		||||
		return NULL;
 | 
			
		||||
	}
 | 
			
		||||
	return ucounts;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void put_ucounts(struct ucounts *ucounts)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
	if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
 | 
			
		||||
		hlist_del_init(&ucounts->node);
 | 
			
		||||
	if (rcuref_put(&ucounts->count)) {
 | 
			
		||||
		spin_lock_irqsave(&ucounts_lock, flags);
 | 
			
		||||
		hlist_nulls_del_rcu(&ucounts->node);
 | 
			
		||||
		spin_unlock_irqrestore(&ucounts_lock, flags);
 | 
			
		||||
 | 
			
		||||
		put_user_ns(ucounts->ns);
 | 
			
		||||
		kfree(ucounts);
 | 
			
		||||
		kfree_rcu(ucounts, rcu);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -269,12 +269,10 @@ void __init hardlockup_config_perf_event(const char *str)
 | 
			
		|||
	} else {
 | 
			
		||||
		unsigned int len = comma - str;
 | 
			
		||||
 | 
			
		||||
		if (len >= sizeof(buf))
 | 
			
		||||
		if (len > sizeof(buf))
 | 
			
		||||
			return;
 | 
			
		||||
 | 
			
		||||
		if (strscpy(buf, str, sizeof(buf)) < 0)
 | 
			
		||||
			return;
 | 
			
		||||
		buf[len] = 0;
 | 
			
		||||
		strscpy(buf, str, len);
 | 
			
		||||
		if (kstrtoull(buf, 16, &config))
 | 
			
		||||
			return;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1280,6 +1280,17 @@ config BOOTPARAM_HUNG_TASK_PANIC
 | 
			
		|||
 | 
			
		||||
	  Say N if unsure.
 | 
			
		||||
 | 
			
		||||
config DETECT_HUNG_TASK_BLOCKER
 | 
			
		||||
	bool "Dump Hung Tasks Blocker"
 | 
			
		||||
	depends on DETECT_HUNG_TASK
 | 
			
		||||
	depends on !PREEMPT_RT
 | 
			
		||||
	default y
 | 
			
		||||
	help
 | 
			
		||||
	  Say Y here to show the blocker task's stacktrace who acquires
 | 
			
		||||
	  the mutex lock which "hung tasks" are waiting.
 | 
			
		||||
	  This will add overhead a bit but shows suspicious tasks and
 | 
			
		||||
	  call trace if it comes from waiting a mutex.
 | 
			
		||||
 | 
			
		||||
config WQ_WATCHDOG
 | 
			
		||||
	bool "Detect Workqueue Stalls"
 | 
			
		||||
	depends on DEBUG_KERNEL
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -20,9 +20,15 @@ EXPORT_SYMBOL_GPL(interval_tree_iter_next);
 | 
			
		|||
/*
 | 
			
		||||
 * Roll nodes[1] into nodes[0] by advancing nodes[1] to the end of a contiguous
 | 
			
		||||
 * span of nodes. This makes nodes[0]->last the end of that contiguous used span
 | 
			
		||||
 * indexes that started at the original nodes[1]->start. nodes[1] is now the
 | 
			
		||||
 * first node starting the next used span. A hole span is between nodes[0]->last
 | 
			
		||||
 * and nodes[1]->start. nodes[1] must be !NULL.
 | 
			
		||||
 * of indexes that started at the original nodes[1]->start.
 | 
			
		||||
 *
 | 
			
		||||
 * If there is an interior hole, nodes[1] is now the first node starting the
 | 
			
		||||
 * next used span. A hole span is between nodes[0]->last and nodes[1]->start.
 | 
			
		||||
 *
 | 
			
		||||
 * If there is a tailing hole, nodes[1] is now NULL. A hole span is between
 | 
			
		||||
 * nodes[0]->last and last_index.
 | 
			
		||||
 *
 | 
			
		||||
 * If the contiguous used range span to last_index, nodes[1] is set to NULL.
 | 
			
		||||
 */
 | 
			
		||||
static void
 | 
			
		||||
interval_tree_span_iter_next_gap(struct interval_tree_span_iter *state)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5,6 +5,8 @@
 | 
			
		|||
#include <linux/prandom.h>
 | 
			
		||||
#include <linux/slab.h>
 | 
			
		||||
#include <asm/timex.h>
 | 
			
		||||
#include <linux/bitmap.h>
 | 
			
		||||
#include <linux/maple_tree.h>
 | 
			
		||||
 | 
			
		||||
#define __param(type, name, init, msg)		\
 | 
			
		||||
	static type name = init;		\
 | 
			
		||||
| 
						 | 
				
			
			@ -19,6 +21,7 @@ __param(int, search_loops, 1000, "Number of iterations searching the tree");
 | 
			
		|||
__param(bool, search_all, false, "Searches will iterate all nodes in the tree");
 | 
			
		||||
 | 
			
		||||
__param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint");
 | 
			
		||||
__param(ullong, seed, 3141592653589793238ULL, "Random seed");
 | 
			
		||||
 | 
			
		||||
static struct rb_root_cached root = RB_ROOT_CACHED;
 | 
			
		||||
static struct interval_tree_node *nodes = NULL;
 | 
			
		||||
| 
						 | 
				
			
			@ -59,26 +62,13 @@ static void init(void)
 | 
			
		|||
		queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int interval_tree_test_init(void)
 | 
			
		||||
static int basic_check(void)
 | 
			
		||||
{
 | 
			
		||||
	int i, j;
 | 
			
		||||
	unsigned long results;
 | 
			
		||||
	cycles_t time1, time2, time;
 | 
			
		||||
 | 
			
		||||
	nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
 | 
			
		||||
			      GFP_KERNEL);
 | 
			
		||||
	if (!nodes)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL);
 | 
			
		||||
	if (!queries) {
 | 
			
		||||
		kfree(nodes);
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	printk(KERN_ALERT "interval tree insert/remove");
 | 
			
		||||
 | 
			
		||||
	prandom_seed_state(&rnd, 3141592653589793238ULL);
 | 
			
		||||
	init();
 | 
			
		||||
 | 
			
		||||
	time1 = get_cycles();
 | 
			
		||||
| 
						 | 
				
			
			@ -96,8 +86,19 @@ static int interval_tree_test_init(void)
 | 
			
		|||
	time = div_u64(time, perf_loops);
 | 
			
		||||
	printk(" -> %llu cycles\n", (unsigned long long)time);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int search_check(void)
 | 
			
		||||
{
 | 
			
		||||
	int i, j;
 | 
			
		||||
	unsigned long results;
 | 
			
		||||
	cycles_t time1, time2, time;
 | 
			
		||||
 | 
			
		||||
	printk(KERN_ALERT "interval tree search");
 | 
			
		||||
 | 
			
		||||
	init();
 | 
			
		||||
 | 
			
		||||
	for (j = 0; j < nnodes; j++)
 | 
			
		||||
		interval_tree_insert(nodes + j, &root);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -120,6 +121,214 @@ static int interval_tree_test_init(void)
 | 
			
		|||
	printk(" -> %llu cycles (%lu results)\n",
 | 
			
		||||
	       (unsigned long long)time, results);
 | 
			
		||||
 | 
			
		||||
	for (j = 0; j < nnodes; j++)
 | 
			
		||||
		interval_tree_remove(nodes + j, &root);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int intersection_range_check(void)
 | 
			
		||||
{
 | 
			
		||||
	int i, j, k;
 | 
			
		||||
	unsigned long start, last;
 | 
			
		||||
	struct interval_tree_node *node;
 | 
			
		||||
	unsigned long *intxn1;
 | 
			
		||||
	unsigned long *intxn2;
 | 
			
		||||
 | 
			
		||||
	printk(KERN_ALERT "interval tree iteration\n");
 | 
			
		||||
 | 
			
		||||
	intxn1 = bitmap_alloc(nnodes, GFP_KERNEL);
 | 
			
		||||
	if (!intxn1) {
 | 
			
		||||
		WARN_ON_ONCE("Failed to allocate intxn1\n");
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	intxn2 = bitmap_alloc(nnodes, GFP_KERNEL);
 | 
			
		||||
	if (!intxn2) {
 | 
			
		||||
		WARN_ON_ONCE("Failed to allocate intxn2\n");
 | 
			
		||||
		bitmap_free(intxn1);
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < search_loops; i++) {
 | 
			
		||||
		/* Initialize interval tree for each round */
 | 
			
		||||
		init();
 | 
			
		||||
		for (j = 0; j < nnodes; j++)
 | 
			
		||||
			interval_tree_insert(nodes + j, &root);
 | 
			
		||||
 | 
			
		||||
		/* Let's try nsearches different ranges */
 | 
			
		||||
		for (k = 0; k < nsearches; k++) {
 | 
			
		||||
			/* Try whole range once */
 | 
			
		||||
			if (!k) {
 | 
			
		||||
				start = 0UL;
 | 
			
		||||
				last = ULONG_MAX;
 | 
			
		||||
			} else {
 | 
			
		||||
				last = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
 | 
			
		||||
				start = (prandom_u32_state(&rnd) >> 4) % last;
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			/* Walk nodes to mark intersection nodes */
 | 
			
		||||
			bitmap_zero(intxn1, nnodes);
 | 
			
		||||
			for (j = 0; j < nnodes; j++) {
 | 
			
		||||
				node = nodes + j;
 | 
			
		||||
 | 
			
		||||
				if (start <= node->last && last >= node->start)
 | 
			
		||||
					bitmap_set(intxn1, j, 1);
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			/* Iterate tree to clear intersection nodes */
 | 
			
		||||
			bitmap_zero(intxn2, nnodes);
 | 
			
		||||
			for (node = interval_tree_iter_first(&root, start, last); node;
 | 
			
		||||
			     node = interval_tree_iter_next(node, start, last))
 | 
			
		||||
				bitmap_set(intxn2, node - nodes, 1);
 | 
			
		||||
 | 
			
		||||
			WARN_ON_ONCE(!bitmap_equal(intxn1, intxn2, nnodes));
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		for (j = 0; j < nnodes; j++)
 | 
			
		||||
			interval_tree_remove(nodes + j, &root);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	bitmap_free(intxn1);
 | 
			
		||||
	bitmap_free(intxn2);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_INTERVAL_TREE_SPAN_ITER
 | 
			
		||||
/*
 | 
			
		||||
 * Helper function to get span of current position from maple tree point of
 | 
			
		||||
 * view.
 | 
			
		||||
 */
 | 
			
		||||
static void mas_cur_span(struct ma_state *mas, struct interval_tree_span_iter *state)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long cur_start;
 | 
			
		||||
	unsigned long cur_last;
 | 
			
		||||
	int is_hole;
 | 
			
		||||
 | 
			
		||||
	if (mas->status == ma_overflow)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/* walk to current position */
 | 
			
		||||
	state->is_hole = mas_walk(mas) ? 0 : 1;
 | 
			
		||||
 | 
			
		||||
	cur_start = mas->index < state->first_index ?
 | 
			
		||||
			state->first_index : mas->index;
 | 
			
		||||
 | 
			
		||||
	/* whether we have followers */
 | 
			
		||||
	do {
 | 
			
		||||
 | 
			
		||||
		cur_last = mas->last > state->last_index ?
 | 
			
		||||
				state->last_index : mas->last;
 | 
			
		||||
 | 
			
		||||
		is_hole = mas_next_range(mas, state->last_index) ? 0 : 1;
 | 
			
		||||
 | 
			
		||||
	} while (mas->status != ma_overflow && is_hole == state->is_hole);
 | 
			
		||||
 | 
			
		||||
	if (state->is_hole) {
 | 
			
		||||
		state->start_hole = cur_start;
 | 
			
		||||
		state->last_hole = cur_last;
 | 
			
		||||
	} else {
 | 
			
		||||
		state->start_used = cur_start;
 | 
			
		||||
		state->last_used = cur_last;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* advance position for next round */
 | 
			
		||||
	if (mas->status != ma_overflow)
 | 
			
		||||
		mas_set(mas, cur_last + 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int span_iteration_check(void)
 | 
			
		||||
{
 | 
			
		||||
	int i, j, k;
 | 
			
		||||
	unsigned long start, last;
 | 
			
		||||
	struct interval_tree_span_iter span, mas_span;
 | 
			
		||||
 | 
			
		||||
	DEFINE_MTREE(tree);
 | 
			
		||||
 | 
			
		||||
	MA_STATE(mas, &tree, 0, 0);
 | 
			
		||||
 | 
			
		||||
	printk(KERN_ALERT "interval tree span iteration\n");
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < search_loops; i++) {
 | 
			
		||||
		/* Initialize interval tree for each round */
 | 
			
		||||
		init();
 | 
			
		||||
		for (j = 0; j < nnodes; j++)
 | 
			
		||||
			interval_tree_insert(nodes + j, &root);
 | 
			
		||||
 | 
			
		||||
		/* Put all the range into maple tree */
 | 
			
		||||
		mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
 | 
			
		||||
		mt_set_in_rcu(&tree);
 | 
			
		||||
 | 
			
		||||
		for (j = 0; j < nnodes; j++)
 | 
			
		||||
			WARN_ON_ONCE(mtree_store_range(&tree, nodes[j].start,
 | 
			
		||||
					nodes[j].last, nodes + j, GFP_KERNEL));
 | 
			
		||||
 | 
			
		||||
		/* Let's try nsearches different ranges */
 | 
			
		||||
		for (k = 0; k < nsearches; k++) {
 | 
			
		||||
			/* Try whole range once */
 | 
			
		||||
			if (!k) {
 | 
			
		||||
				start = 0UL;
 | 
			
		||||
				last = ULONG_MAX;
 | 
			
		||||
			} else {
 | 
			
		||||
				last = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
 | 
			
		||||
				start = (prandom_u32_state(&rnd) >> 4) % last;
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			mas_span.first_index = start;
 | 
			
		||||
			mas_span.last_index = last;
 | 
			
		||||
			mas_span.is_hole = -1;
 | 
			
		||||
			mas_set(&mas, start);
 | 
			
		||||
 | 
			
		||||
			interval_tree_for_each_span(&span, &root, start, last) {
 | 
			
		||||
				mas_cur_span(&mas, &mas_span);
 | 
			
		||||
 | 
			
		||||
				WARN_ON_ONCE(span.is_hole != mas_span.is_hole);
 | 
			
		||||
 | 
			
		||||
				if (span.is_hole) {
 | 
			
		||||
					WARN_ON_ONCE(span.start_hole != mas_span.start_hole);
 | 
			
		||||
					WARN_ON_ONCE(span.last_hole != mas_span.last_hole);
 | 
			
		||||
				} else {
 | 
			
		||||
					WARN_ON_ONCE(span.start_used != mas_span.start_used);
 | 
			
		||||
					WARN_ON_ONCE(span.last_used != mas_span.last_used);
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		WARN_ON_ONCE(mas.status != ma_overflow);
 | 
			
		||||
 | 
			
		||||
		/* Cleanup maple tree for each round */
 | 
			
		||||
		mtree_destroy(&tree);
 | 
			
		||||
		/* Cleanup interval tree for each round */
 | 
			
		||||
		for (j = 0; j < nnodes; j++)
 | 
			
		||||
			interval_tree_remove(nodes + j, &root);
 | 
			
		||||
	}
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
static inline int span_iteration_check(void) {return 0; }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static int interval_tree_test_init(void)
 | 
			
		||||
{
 | 
			
		||||
	nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
 | 
			
		||||
			      GFP_KERNEL);
 | 
			
		||||
	if (!nodes)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL);
 | 
			
		||||
	if (!queries) {
 | 
			
		||||
		kfree(nodes);
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	prandom_seed_state(&rnd, seed);
 | 
			
		||||
 | 
			
		||||
	basic_check();
 | 
			
		||||
	search_check();
 | 
			
		||||
	intersection_range_check();
 | 
			
		||||
	span_iteration_check();
 | 
			
		||||
 | 
			
		||||
	kfree(queries);
 | 
			
		||||
	kfree(nodes);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2,7 +2,7 @@
 | 
			
		|||
#include <linux/export.h>
 | 
			
		||||
#include <linux/min_heap.h>
 | 
			
		||||
 | 
			
		||||
void __min_heap_init(min_heap_char *heap, void *data, int size)
 | 
			
		||||
void __min_heap_init(min_heap_char *heap, void *data, size_t size)
 | 
			
		||||
{
 | 
			
		||||
	__min_heap_init_inline(heap, data, size);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -20,7 +20,7 @@ bool __min_heap_full(min_heap_char *heap)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(__min_heap_full);
 | 
			
		||||
 | 
			
		||||
void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size,
 | 
			
		||||
void __min_heap_sift_down(min_heap_char *heap, size_t pos, size_t elem_size,
 | 
			
		||||
			  const struct min_heap_callbacks *func, void *args)
 | 
			
		||||
{
 | 
			
		||||
	__min_heap_sift_down_inline(heap, pos, elem_size, func, args);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										12
									
								
								lib/plist.c
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								lib/plist.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -171,12 +171,24 @@ void plist_requeue(struct plist_node *node, struct plist_head *head)
 | 
			
		|||
 | 
			
		||||
	plist_del(node, head);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * After plist_del(), iter is the replacement of the node.  If the node
 | 
			
		||||
	 * was on prio_list, take shortcut to find node_next instead of looping.
 | 
			
		||||
	 */
 | 
			
		||||
	if (!list_empty(&iter->prio_list)) {
 | 
			
		||||
		iter = list_entry(iter->prio_list.next, struct plist_node,
 | 
			
		||||
				  prio_list);
 | 
			
		||||
		node_next = &iter->node_list;
 | 
			
		||||
		goto queue;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	plist_for_each_continue(iter, head) {
 | 
			
		||||
		if (node->prio != iter->prio) {
 | 
			
		||||
			node_next = &iter->node_list;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
queue:
 | 
			
		||||
	list_add_tail(&node->node_list, node_next);
 | 
			
		||||
 | 
			
		||||
	plist_check_head(head);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -14,6 +14,7 @@
 | 
			
		|||
__param(int, nnodes, 100, "Number of nodes in the rb-tree");
 | 
			
		||||
__param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
 | 
			
		||||
__param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree");
 | 
			
		||||
__param(ullong, seed, 3141592653589793238ULL, "Random seed");
 | 
			
		||||
 | 
			
		||||
struct test_node {
 | 
			
		||||
	u32 key;
 | 
			
		||||
| 
						 | 
				
			
			@ -239,19 +240,14 @@ static void check_augmented(int nr_nodes)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int __init rbtree_test_init(void)
 | 
			
		||||
static int basic_check(void)
 | 
			
		||||
{
 | 
			
		||||
	int i, j;
 | 
			
		||||
	cycles_t time1, time2, time;
 | 
			
		||||
	struct rb_node *node;
 | 
			
		||||
 | 
			
		||||
	nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
 | 
			
		||||
	if (!nodes)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	printk(KERN_ALERT "rbtree testing");
 | 
			
		||||
 | 
			
		||||
	prandom_seed_state(&rnd, 3141592653589793238ULL);
 | 
			
		||||
	init();
 | 
			
		||||
 | 
			
		||||
	time1 = get_cycles();
 | 
			
		||||
| 
						 | 
				
			
			@ -343,6 +339,14 @@ static int __init rbtree_test_init(void)
 | 
			
		|||
		check(0);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int augmented_check(void)
 | 
			
		||||
{
 | 
			
		||||
	int i, j;
 | 
			
		||||
	cycles_t time1, time2, time;
 | 
			
		||||
 | 
			
		||||
	printk(KERN_ALERT "augmented rbtree testing");
 | 
			
		||||
 | 
			
		||||
	init();
 | 
			
		||||
| 
						 | 
				
			
			@ -390,6 +394,20 @@ static int __init rbtree_test_init(void)
 | 
			
		|||
		check_augmented(0);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int __init rbtree_test_init(void)
 | 
			
		||||
{
 | 
			
		||||
	nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
 | 
			
		||||
	if (!nodes)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	prandom_seed_state(&rnd, seed);
 | 
			
		||||
 | 
			
		||||
	basic_check();
 | 
			
		||||
	augmented_check();
 | 
			
		||||
 | 
			
		||||
	kfree(nodes);
 | 
			
		||||
 | 
			
		||||
	return -EAGAIN; /* Fail will directly unload the module */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -151,9 +151,6 @@ static const config configuration_table[10] = {
 | 
			
		|||
 * meaning.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#define EQUAL 0
 | 
			
		||||
/* result of memcmp for equal strings */
 | 
			
		||||
 | 
			
		||||
/* ===========================================================================
 | 
			
		||||
 * Update a hash value with the given input byte
 | 
			
		||||
 * IN  assertion: all calls to UPDATE_HASH are made with consecutive
 | 
			
		||||
| 
						 | 
				
			
			@ -713,8 +710,7 @@ static void check_match(
 | 
			
		|||
)
 | 
			
		||||
{
 | 
			
		||||
    /* check that the match is indeed a match */
 | 
			
		||||
    if (memcmp((char *)s->window + match,
 | 
			
		||||
                (char *)s->window + start, length) != EQUAL) {
 | 
			
		||||
    if (memcmp((char *)s->window + match, (char *)s->window + start, length)) {
 | 
			
		||||
        fprintf(stderr, " start %u, match %u, length %d\n",
 | 
			
		||||
		start, match, length);
 | 
			
		||||
        do {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -300,6 +300,15 @@ config SAMPLE_CHECK_EXEC
 | 
			
		|||
	  demonstrate how they should be used with execveat(2) +
 | 
			
		||||
	  AT_EXECVE_CHECK.
 | 
			
		||||
 | 
			
		||||
config SAMPLE_HUNG_TASK
 | 
			
		||||
	tristate "Hung task detector test code"
 | 
			
		||||
	depends on DETECT_HUNG_TASK && DEBUG_FS
 | 
			
		||||
	help
 | 
			
		||||
	  Build a module which provide a simple debugfs file. If user reads
 | 
			
		||||
	  the file, it will sleep long time (256 seconds) with holding a
 | 
			
		||||
	  mutex. Thus if there are 2 or more processes read this file, it
 | 
			
		||||
	  will be detected by the hung_task watchdog.
 | 
			
		||||
 | 
			
		||||
source "samples/rust/Kconfig"
 | 
			
		||||
 | 
			
		||||
source "samples/damon/Kconfig"
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -42,3 +42,4 @@ obj-$(CONFIG_SAMPLE_FPROBE)		+= fprobe/
 | 
			
		|||
obj-$(CONFIG_SAMPLES_RUST)		+= rust/
 | 
			
		||||
obj-$(CONFIG_SAMPLE_DAMON_WSSE)		+= damon/
 | 
			
		||||
obj-$(CONFIG_SAMPLE_DAMON_PRCL)		+= damon/
 | 
			
		||||
obj-$(CONFIG_SAMPLE_HUNG_TASK)		+= hung_task/
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										2
									
								
								samples/hung_task/Makefile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								samples/hung_task/Makefile
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,2 @@
 | 
			
		|||
# SPDX-License-Identifier: GPL-2.0-only
 | 
			
		||||
obj-$(CONFIG_SAMPLE_HUNG_TASK) += hung_task_mutex.o
 | 
			
		||||
							
								
								
									
										66
									
								
								samples/hung_task/hung_task_mutex.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										66
									
								
								samples/hung_task/hung_task_mutex.c
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,66 @@
 | 
			
		|||
// SPDX-License-Identifier: GPL-2.0-or-later
 | 
			
		||||
/*
 | 
			
		||||
 * hung_task_mutex.c - Sample code which causes hung task by mutex
 | 
			
		||||
 *
 | 
			
		||||
 * Usage: load this module and read `<debugfs>/hung_task/mutex`
 | 
			
		||||
 *        by 2 or more processes.
 | 
			
		||||
 *
 | 
			
		||||
 * This is for testing kernel hung_task error message.
 | 
			
		||||
 * Note that this will make your system freeze and maybe
 | 
			
		||||
 * cause panic. So do not use this except for the test.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include <linux/debugfs.h>
 | 
			
		||||
#include <linux/delay.h>
 | 
			
		||||
#include <linux/fs.h>
 | 
			
		||||
#include <linux/module.h>
 | 
			
		||||
#include <linux/mutex.h>
 | 
			
		||||
 | 
			
		||||
#define HUNG_TASK_DIR   "hung_task"
 | 
			
		||||
#define HUNG_TASK_FILE  "mutex"
 | 
			
		||||
#define SLEEP_SECOND 256
 | 
			
		||||
 | 
			
		||||
static const char dummy_string[] = "This is a dummy string.";
 | 
			
		||||
static DEFINE_MUTEX(dummy_mutex);
 | 
			
		||||
static struct dentry *hung_task_dir;
 | 
			
		||||
 | 
			
		||||
static ssize_t read_dummy(struct file *file, char __user *user_buf,
 | 
			
		||||
			  size_t count, loff_t *ppos)
 | 
			
		||||
{
 | 
			
		||||
	/* If the second task waits on the lock, it is uninterruptible sleep. */
 | 
			
		||||
	guard(mutex)(&dummy_mutex);
 | 
			
		||||
 | 
			
		||||
	/* When the first task sleep here, it is interruptible. */
 | 
			
		||||
	msleep_interruptible(SLEEP_SECOND * 1000);
 | 
			
		||||
 | 
			
		||||
	return simple_read_from_buffer(user_buf, count, ppos,
 | 
			
		||||
				dummy_string, sizeof(dummy_string));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static const struct file_operations hung_task_fops = {
 | 
			
		||||
	.read = read_dummy,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static int __init hung_task_sample_init(void)
 | 
			
		||||
{
 | 
			
		||||
	hung_task_dir = debugfs_create_dir(HUNG_TASK_DIR, NULL);
 | 
			
		||||
	if (IS_ERR(hung_task_dir))
 | 
			
		||||
		return PTR_ERR(hung_task_dir);
 | 
			
		||||
 | 
			
		||||
	debugfs_create_file(HUNG_TASK_FILE, 0400, hung_task_dir,
 | 
			
		||||
			    NULL, &hung_task_fops);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __exit hung_task_sample_exit(void)
 | 
			
		||||
{
 | 
			
		||||
	debugfs_remove_recursive(hung_task_dir);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
module_init(hung_task_sample_init);
 | 
			
		||||
module_exit(hung_task_sample_exit);
 | 
			
		||||
 | 
			
		||||
MODULE_LICENSE("GPL");
 | 
			
		||||
MODULE_AUTHOR("Masami Hiramatsu");
 | 
			
		||||
MODULE_DESCRIPTION("Simple sleep under mutex file for testing hung task");
 | 
			
		||||
| 
						 | 
				
			
			@ -113,7 +113,8 @@ Options:
 | 
			
		|||
  --max-line-length=n        set the maximum line length, (default $max_line_length)
 | 
			
		||||
                             if exceeded, warn on patches
 | 
			
		||||
                             requires --strict for use with --file
 | 
			
		||||
  --min-conf-desc-length=n   set the min description length, if shorter, warn
 | 
			
		||||
  --min-conf-desc-length=n   set the minimum description length for config symbols
 | 
			
		||||
                             in lines, if shorter, warn (default $min_conf_desc_length)
 | 
			
		||||
  --tab-size=n               set the number of spaces for tab (default $tabsize)
 | 
			
		||||
  --root=PATH                PATH to the kernel tree root
 | 
			
		||||
  --no-summary               suppress the per-file summary
 | 
			
		||||
| 
						 | 
				
			
			@ -3645,7 +3646,7 @@ sub process {
 | 
			
		|||
			    $help_length < $min_conf_desc_length) {
 | 
			
		||||
				my $stat_real = get_stat_real($linenr, $ln - 1);
 | 
			
		||||
				WARN("CONFIG_DESCRIPTION",
 | 
			
		||||
				     "please write a help paragraph that fully describes the config symbol\n" . "$here\n$stat_real\n");
 | 
			
		||||
				     "please write a help paragraph that fully describes the config symbol with at least $min_conf_desc_length lines\n" . "$here\n$stat_real\n");
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -20,3 +20,13 @@ virtual patch
 | 
			
		|||
 | 
			
		||||
- msecs_to_jiffies(C * MSEC_PER_SEC)
 | 
			
		||||
+ secs_to_jiffies(C)
 | 
			
		||||
 | 
			
		||||
@depends on patch@ expression E; @@
 | 
			
		||||
 | 
			
		||||
- msecs_to_jiffies(E * 1000)
 | 
			
		||||
+ secs_to_jiffies(E)
 | 
			
		||||
 | 
			
		||||
@depends on patch@ expression E; @@
 | 
			
		||||
 | 
			
		||||
- msecs_to_jiffies(E * MSEC_PER_SEC)
 | 
			
		||||
+ secs_to_jiffies(E)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										30
									
								
								scripts/extract-fwblobs
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										30
									
								
								scripts/extract-fwblobs
									
									
									
									
									
										Executable file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,30 @@
 | 
			
		|||
#!/bin/bash
 | 
			
		||||
# SPDX-License-Identifier: GPL-2.0
 | 
			
		||||
#
 | 
			
		||||
# -----------------------------------------------------------------------------
 | 
			
		||||
# Extracts the vmlinux built-in firmware blobs - requires a non-stripped image
 | 
			
		||||
# -----------------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
if [ -z "$1" ]; then
 | 
			
		||||
	echo "Must provide a non-stripped vmlinux as argument"
 | 
			
		||||
	exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
read -r RD_ADDR_HEX RD_OFF_HEX <<< "$( readelf -SW "$1" |\
 | 
			
		||||
grep -w rodata | awk '{print "0x"$5" 0x"$6}' )"
 | 
			
		||||
 | 
			
		||||
FW_SYMS="$(readelf -sW "$1" |\
 | 
			
		||||
awk -n '/fw_end/ { end=$2 ; print name " 0x" start " 0x" end; } { start=$2; name=$8; }')"
 | 
			
		||||
 | 
			
		||||
while IFS= read -r entry; do
 | 
			
		||||
	read -r FW_NAME FW_ADDR_ST_HEX FW_ADDR_END_HEX <<< "$entry"
 | 
			
		||||
 | 
			
		||||
	# Notice kernel prepends _fw_ and appends _bin to the FW name
 | 
			
		||||
	# in rodata; hence we hereby filter that out.
 | 
			
		||||
	FW_NAME=${FW_NAME:4:-4}
 | 
			
		||||
 | 
			
		||||
	FW_OFFSET="$(printf "%d" $((FW_ADDR_ST_HEX - RD_ADDR_HEX + RD_OFF_HEX)))"
 | 
			
		||||
	FW_SIZE="$(printf "%d" $((FW_ADDR_END_HEX - FW_ADDR_ST_HEX)))"
 | 
			
		||||
 | 
			
		||||
	dd if="$1" of="./${FW_NAME}" bs="${FW_SIZE}" count=1 iflag=skip_bytes skip="${FW_OFFSET}"
 | 
			
		||||
done <<< "${FW_SYMS}"
 | 
			
		||||
| 
						 | 
				
			
			@ -46,7 +46,7 @@ def per_cpu(var_ptr, cpu):
 | 
			
		|||
            # !CONFIG_SMP case
 | 
			
		||||
            offset = 0
 | 
			
		||||
    pointer = var_ptr.cast(utils.get_long_type()) + offset
 | 
			
		||||
    return pointer.cast(var_ptr.type).dereference()
 | 
			
		||||
    return pointer.cast(var_ptr.type)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cpu_mask = {}
 | 
			
		||||
| 
						 | 
				
			
			@ -149,11 +149,29 @@ Note that VAR has to be quoted as string."""
 | 
			
		|||
        super(PerCpu, self).__init__("lx_per_cpu")
 | 
			
		||||
 | 
			
		||||
    def invoke(self, var, cpu=-1):
 | 
			
		||||
        return per_cpu(var.address, cpu)
 | 
			
		||||
        return per_cpu(var.address, cpu).dereference()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
PerCpu()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class PerCpuPtr(gdb.Function):
 | 
			
		||||
    """Return per-cpu pointer.
 | 
			
		||||
 | 
			
		||||
$lx_per_cpu_ptr("VAR"[, CPU]): Return the per-cpu pointer called VAR for the
 | 
			
		||||
given CPU number. If CPU is omitted, the CPU of the current context is used.
 | 
			
		||||
Note that VAR has to be quoted as string."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(PerCpuPtr, self).__init__("lx_per_cpu_ptr")
 | 
			
		||||
 | 
			
		||||
    def invoke(self, var, cpu=-1):
 | 
			
		||||
        return per_cpu(var, cpu)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
PerCpuPtr()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_current_task(cpu):
 | 
			
		||||
    task_ptr_type = task_type.get_type().pointer()
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -14,7 +14,9 @@
 | 
			
		|||
import gdb
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import struct
 | 
			
		||||
 | 
			
		||||
from itertools import count
 | 
			
		||||
from linux import modules, utils, constants
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -53,6 +55,29 @@ if hasattr(gdb, 'Breakpoint'):
 | 
			
		|||
            return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_vmcore_s390():
 | 
			
		||||
    with utils.qemu_phy_mem_mode():
 | 
			
		||||
        vmcore_info = 0x0e0c
 | 
			
		||||
        paddr_vmcoreinfo_note = gdb.parse_and_eval("*(unsigned long long *)" +
 | 
			
		||||
                                                   hex(vmcore_info))
 | 
			
		||||
        inferior = gdb.selected_inferior()
 | 
			
		||||
        elf_note = inferior.read_memory(paddr_vmcoreinfo_note, 12)
 | 
			
		||||
        n_namesz, n_descsz, n_type = struct.unpack(">III", elf_note)
 | 
			
		||||
        desc_paddr = paddr_vmcoreinfo_note + len(elf_note) + n_namesz + 1
 | 
			
		||||
        return gdb.parse_and_eval("(char *)" + hex(desc_paddr)).string()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_kerneloffset():
 | 
			
		||||
    if utils.is_target_arch('s390'):
 | 
			
		||||
        try:
 | 
			
		||||
            vmcore_str = get_vmcore_s390()
 | 
			
		||||
        except gdb.error as e:
 | 
			
		||||
            gdb.write("{}\n".format(e))
 | 
			
		||||
            return None
 | 
			
		||||
        return utils.parse_vmcore(vmcore_str).kerneloffset
 | 
			
		||||
    return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class LxSymbols(gdb.Command):
 | 
			
		||||
    """(Re-)load symbols of Linux kernel and currently loaded modules.
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -95,10 +120,14 @@ lx-symbols command."""
 | 
			
		|||
        except gdb.error:
 | 
			
		||||
            return str(module_addr)
 | 
			
		||||
 | 
			
		||||
        attrs = sect_attrs['attrs']
 | 
			
		||||
        section_name_to_address = {
 | 
			
		||||
            attrs[n]['battr']['attr']['name'].string(): attrs[n]['address']
 | 
			
		||||
            for n in range(int(sect_attrs['nsections']))}
 | 
			
		||||
        section_name_to_address = {}
 | 
			
		||||
        for i in count():
 | 
			
		||||
            # this is a NULL terminated array
 | 
			
		||||
            if sect_attrs['grp']['bin_attrs'][i] == 0x0:
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
            attr = sect_attrs['grp']['bin_attrs'][i].dereference()
 | 
			
		||||
            section_name_to_address[attr['attr']['name'].string()] = attr['private']
 | 
			
		||||
 | 
			
		||||
        textaddr = section_name_to_address.get(".text", module_addr)
 | 
			
		||||
        args = []
 | 
			
		||||
| 
						 | 
				
			
			@ -155,7 +184,12 @@ lx-symbols command."""
 | 
			
		|||
                obj.filename.endswith('vmlinux.debug')):
 | 
			
		||||
                orig_vmlinux = obj.filename
 | 
			
		||||
        gdb.execute("symbol-file", to_string=True)
 | 
			
		||||
        gdb.execute("symbol-file {0}".format(orig_vmlinux))
 | 
			
		||||
        kerneloffset = get_kerneloffset()
 | 
			
		||||
        if kerneloffset is None:
 | 
			
		||||
            offset_arg = ""
 | 
			
		||||
        else:
 | 
			
		||||
            offset_arg = " -o " + hex(kerneloffset)
 | 
			
		||||
        gdb.execute("symbol-file {0}{1}".format(orig_vmlinux, offset_arg))
 | 
			
		||||
 | 
			
		||||
        self.loaded_modules = []
 | 
			
		||||
        module_list = modules.module_list()
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -11,6 +11,11 @@
 | 
			
		|||
# This work is licensed under the terms of the GNU GPL version 2.
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
import contextlib
 | 
			
		||||
import dataclasses
 | 
			
		||||
import re
 | 
			
		||||
import typing
 | 
			
		||||
 | 
			
		||||
import gdb
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -216,3 +221,33 @@ def gdb_eval_or_none(expresssion):
 | 
			
		|||
        return gdb.parse_and_eval(expresssion)
 | 
			
		||||
    except gdb.error:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@contextlib.contextmanager
 | 
			
		||||
def qemu_phy_mem_mode():
 | 
			
		||||
    connection = gdb.selected_inferior().connection
 | 
			
		||||
    orig = connection.send_packet("qqemu.PhyMemMode")
 | 
			
		||||
    if orig not in b"01":
 | 
			
		||||
        raise gdb.error("Unexpected qemu.PhyMemMode")
 | 
			
		||||
    orig = orig.decode()
 | 
			
		||||
    if connection.send_packet("Qqemu.PhyMemMode:1") != b"OK":
 | 
			
		||||
        raise gdb.error("Failed to set qemu.PhyMemMode")
 | 
			
		||||
    try:
 | 
			
		||||
        yield
 | 
			
		||||
    finally:
 | 
			
		||||
        if connection.send_packet("Qqemu.PhyMemMode:" + orig) != b"OK":
 | 
			
		||||
            raise gdb.error("Failed to restore qemu.PhyMemMode")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@dataclasses.dataclass
 | 
			
		||||
class VmCore:
 | 
			
		||||
    kerneloffset: typing.Optional[int]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def parse_vmcore(s):
 | 
			
		||||
    match = re.search(r"KERNELOFFSET=([0-9a-f]+)", s)
 | 
			
		||||
    if match is None:
 | 
			
		||||
        kerneloffset = None
 | 
			
		||||
    else:
 | 
			
		||||
        kerneloffset = int(match.group(1), 16)
 | 
			
		||||
    return VmCore(kerneloffset=kerneloffset)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -50,6 +50,7 @@ my $output_multiline = 1;
 | 
			
		|||
my $output_separator = ", ";
 | 
			
		||||
my $output_roles = 0;
 | 
			
		||||
my $output_rolestats = 1;
 | 
			
		||||
my $output_substatus = undef;
 | 
			
		||||
my $output_section_maxlen = 50;
 | 
			
		||||
my $scm = 0;
 | 
			
		||||
my $tree = 1;
 | 
			
		||||
| 
						 | 
				
			
			@ -269,6 +270,7 @@ if (!GetOptions(
 | 
			
		|||
		'separator=s' => \$output_separator,
 | 
			
		||||
		'subsystem!' => \$subsystem,
 | 
			
		||||
		'status!' => \$status,
 | 
			
		||||
		'substatus!' => \$output_substatus,
 | 
			
		||||
		'scm!' => \$scm,
 | 
			
		||||
		'tree!' => \$tree,
 | 
			
		||||
		'web!' => \$web,
 | 
			
		||||
| 
						 | 
				
			
			@ -314,6 +316,10 @@ $output_multiline = 0 if ($output_separator ne ", ");
 | 
			
		|||
$output_rolestats = 1 if ($interactive);
 | 
			
		||||
$output_roles = 1 if ($output_rolestats);
 | 
			
		||||
 | 
			
		||||
if (!defined $output_substatus) {
 | 
			
		||||
    $output_substatus = $email && $output_roles && -t STDOUT;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
if ($sections || $letters ne "") {
 | 
			
		||||
    $sections = 1;
 | 
			
		||||
    $email = 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -637,6 +643,7 @@ my @web = ();
 | 
			
		|||
my @bug = ();
 | 
			
		||||
my @subsystem = ();
 | 
			
		||||
my @status = ();
 | 
			
		||||
my @substatus = ();
 | 
			
		||||
my %deduplicate_name_hash = ();
 | 
			
		||||
my %deduplicate_address_hash = ();
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -651,6 +658,11 @@ if ($scm) {
 | 
			
		|||
    output(@scm);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
if ($output_substatus) {
 | 
			
		||||
    @substatus = uniq(@substatus);
 | 
			
		||||
    output(@substatus);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
if ($status) {
 | 
			
		||||
    @status = uniq(@status);
 | 
			
		||||
    output(@status);
 | 
			
		||||
| 
						 | 
				
			
			@ -859,6 +871,7 @@ sub get_maintainers {
 | 
			
		|||
    @bug = ();
 | 
			
		||||
    @subsystem = ();
 | 
			
		||||
    @status = ();
 | 
			
		||||
    @substatus = ();
 | 
			
		||||
    %deduplicate_name_hash = ();
 | 
			
		||||
    %deduplicate_address_hash = ();
 | 
			
		||||
    if ($email_git_all_signature_types) {
 | 
			
		||||
| 
						 | 
				
			
			@ -1071,8 +1084,9 @@ MAINTAINER field selection options:
 | 
			
		|||
    --moderated => include moderated lists(s) if any (default: true)
 | 
			
		||||
    --s => include subscriber only list(s) if any (default: false)
 | 
			
		||||
    --remove-duplicates => minimize duplicate email names/addresses
 | 
			
		||||
    --roles => show roles (status:subsystem, git-signer, list, etc...)
 | 
			
		||||
    --roles => show roles (role:subsystem, git-signer, list, etc...)
 | 
			
		||||
    --rolestats => show roles and statistics (commits/total_commits, %)
 | 
			
		||||
    --substatus => show subsystem status if not Maintained (default: match --roles when output is tty)"
 | 
			
		||||
    --file-emails => add email addresses found in -f file (default: 0 (off))
 | 
			
		||||
    --fixes => for patches, add signatures of commits with 'Fixes: <commit>' (default: 1 (on))
 | 
			
		||||
  --scm => print SCM tree(s) if any
 | 
			
		||||
| 
						 | 
				
			
			@ -1284,8 +1298,9 @@ sub get_maintainer_role {
 | 
			
		|||
    my $start = find_starting_index($index);
 | 
			
		||||
    my $end = find_ending_index($index);
 | 
			
		||||
 | 
			
		||||
    my $role = "unknown";
 | 
			
		||||
    my $role = "maintainer";
 | 
			
		||||
    my $subsystem = get_subsystem_name($index);
 | 
			
		||||
    my $status = "unknown";
 | 
			
		||||
 | 
			
		||||
    for ($i = $start + 1; $i < $end; $i++) {
 | 
			
		||||
	my $tv = $typevalue[$i];
 | 
			
		||||
| 
						 | 
				
			
			@ -1293,23 +1308,13 @@ sub get_maintainer_role {
 | 
			
		|||
	    my $ptype = $1;
 | 
			
		||||
	    my $pvalue = $2;
 | 
			
		||||
	    if ($ptype eq "S") {
 | 
			
		||||
		$role = $pvalue;
 | 
			
		||||
		$status = $pvalue;
 | 
			
		||||
	    }
 | 
			
		||||
	}
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    $role = lc($role);
 | 
			
		||||
    if      ($role eq "supported") {
 | 
			
		||||
	$role = "supporter";
 | 
			
		||||
    } elsif ($role eq "maintained") {
 | 
			
		||||
	$role = "maintainer";
 | 
			
		||||
    } elsif ($role eq "odd fixes") {
 | 
			
		||||
	$role = "odd fixer";
 | 
			
		||||
    } elsif ($role eq "orphan") {
 | 
			
		||||
	$role = "orphan minder";
 | 
			
		||||
    } elsif ($role eq "obsolete") {
 | 
			
		||||
	$role = "obsolete minder";
 | 
			
		||||
    } elsif ($role eq "buried alive in reporters") {
 | 
			
		||||
    $status = lc($status);
 | 
			
		||||
    if ($status eq "buried alive in reporters") {
 | 
			
		||||
	$role = "chief penguin";
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1335,7 +1340,9 @@ sub add_categories {
 | 
			
		|||
    my $start = find_starting_index($index);
 | 
			
		||||
    my $end = find_ending_index($index);
 | 
			
		||||
 | 
			
		||||
    push(@subsystem, $typevalue[$start]);
 | 
			
		||||
    my $subsystem = $typevalue[$start];
 | 
			
		||||
    push(@subsystem, $subsystem);
 | 
			
		||||
    my $status = "Unknown";
 | 
			
		||||
 | 
			
		||||
    for ($i = $start + 1; $i < $end; $i++) {
 | 
			
		||||
	my $tv = $typevalue[$i];
 | 
			
		||||
| 
						 | 
				
			
			@ -1386,8 +1393,8 @@ sub add_categories {
 | 
			
		|||
		}
 | 
			
		||||
	    } elsif ($ptype eq "R") {
 | 
			
		||||
		if ($email_reviewer) {
 | 
			
		||||
		    my $subsystem = get_subsystem_name($i);
 | 
			
		||||
		    push_email_addresses($pvalue, "reviewer:$subsystem" . $suffix);
 | 
			
		||||
		    my $subs = get_subsystem_name($i);
 | 
			
		||||
		    push_email_addresses($pvalue, "reviewer:$subs" . $suffix);
 | 
			
		||||
		}
 | 
			
		||||
	    } elsif ($ptype eq "T") {
 | 
			
		||||
		push(@scm, $pvalue . $suffix);
 | 
			
		||||
| 
						 | 
				
			
			@ -1397,9 +1404,14 @@ sub add_categories {
 | 
			
		|||
		push(@bug, $pvalue . $suffix);
 | 
			
		||||
	    } elsif ($ptype eq "S") {
 | 
			
		||||
		push(@status, $pvalue . $suffix);
 | 
			
		||||
		$status = $pvalue;
 | 
			
		||||
	    }
 | 
			
		||||
	}
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if ($subsystem ne "THE REST" and $status ne "Maintained") {
 | 
			
		||||
	push(@substatus, $subsystem . " status: " . $status . $suffix)
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
sub email_inuse {
 | 
			
		||||
| 
						 | 
				
			
			@ -1903,6 +1915,7 @@ EOT
 | 
			
		|||
		$done = 1;
 | 
			
		||||
		$output_rolestats = 0;
 | 
			
		||||
		$output_roles = 0;
 | 
			
		||||
		$output_substatus = 0;
 | 
			
		||||
		last;
 | 
			
		||||
	    } elsif ($nr =~ /^\d+$/ && $nr > 0 && $nr <= $count) {
 | 
			
		||||
		$selected{$nr - 1} = !$selected{$nr - 1};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2461,8 +2461,7 @@ int snd_ac97_update_power(struct snd_ac97 *ac97, int reg, int powerup)
 | 
			
		|||
		 * (for avoiding loud click noises for many (OSS) apps
 | 
			
		||||
		 *  that open/close frequently)
 | 
			
		||||
		 */
 | 
			
		||||
		schedule_delayed_work(&ac97->power_work,
 | 
			
		||||
				      msecs_to_jiffies(power_save * 1000));
 | 
			
		||||
		schedule_delayed_work(&ac97->power_work, secs_to_jiffies(power_save));
 | 
			
		||||
	else {
 | 
			
		||||
		cancel_delayed_work(&ac97->power_work);
 | 
			
		||||
		update_power_regs(ac97);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										13
									
								
								tools/include/asm/timex.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								tools/include/asm/timex.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,13 @@
 | 
			
		|||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
			
		||||
#ifndef __TOOLS_LINUX_ASM_TIMEX_H
 | 
			
		||||
#define __TOOLS_LINUX_ASM_TIMEX_H
 | 
			
		||||
 | 
			
		||||
#include <time.h>
 | 
			
		||||
 | 
			
		||||
#define cycles_t clock_t
 | 
			
		||||
 | 
			
		||||
static inline cycles_t get_cycles(void)
 | 
			
		||||
{
 | 
			
		||||
	return clock();
 | 
			
		||||
}
 | 
			
		||||
#endif // __TOOLS_LINUX_ASM_TIMEX_H
 | 
			
		||||
| 
						 | 
				
			
			@ -19,6 +19,7 @@ bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
 | 
			
		|||
		 const unsigned long *bitmap2, unsigned int bits);
 | 
			
		||||
bool __bitmap_equal(const unsigned long *bitmap1,
 | 
			
		||||
		    const unsigned long *bitmap2, unsigned int bits);
 | 
			
		||||
void __bitmap_set(unsigned long *map, unsigned int start, int len);
 | 
			
		||||
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
 | 
			
		||||
bool __bitmap_intersects(const unsigned long *bitmap1,
 | 
			
		||||
			 const unsigned long *bitmap2, unsigned int bits);
 | 
			
		||||
| 
						 | 
				
			
			@ -79,6 +80,11 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
 | 
			
		|||
		__bitmap_or(dst, src1, src2, nbits);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags __maybe_unused)
 | 
			
		||||
{
 | 
			
		||||
	return malloc(bitmap_size(nbits));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * bitmap_zalloc - Allocate bitmap
 | 
			
		||||
 * @nbits: Number of bits
 | 
			
		||||
| 
						 | 
				
			
			@ -150,6 +156,21 @@ static inline bool bitmap_intersects(const unsigned long *src1,
 | 
			
		|||
		return __bitmap_intersects(src1, src2, nbits);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
 | 
			
		||||
{
 | 
			
		||||
	if (__builtin_constant_p(nbits) && nbits == 1)
 | 
			
		||||
		__set_bit(start, map);
 | 
			
		||||
	else if (small_const_nbits(start + nbits))
 | 
			
		||||
		*map |= GENMASK(start + nbits - 1, start);
 | 
			
		||||
	else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
 | 
			
		||||
		 IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
 | 
			
		||||
		 __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
 | 
			
		||||
		 IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
 | 
			
		||||
		memset((char *)map + start / 8, 0xff, nbits / 8);
 | 
			
		||||
	else
 | 
			
		||||
		__bitmap_set(map, start, nbits);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void bitmap_clear(unsigned long *map, unsigned int start,
 | 
			
		||||
			       unsigned int nbits)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										18
									
								
								tools/include/linux/container_of.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								tools/include/linux/container_of.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,18 @@
 | 
			
		|||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
			
		||||
#ifndef _TOOLS_LINUX_CONTAINER_OF_H
 | 
			
		||||
#define _TOOLS_LINUX_CONTAINER_OF_H
 | 
			
		||||
 | 
			
		||||
#ifndef container_of
 | 
			
		||||
/**
 | 
			
		||||
 * container_of - cast a member of a structure out to the containing structure
 | 
			
		||||
 * @ptr:	the pointer to the member.
 | 
			
		||||
 * @type:	the type of the container struct this is embedded in.
 | 
			
		||||
 * @member:	the name of the member within the struct.
 | 
			
		||||
 *
 | 
			
		||||
 */
 | 
			
		||||
#define container_of(ptr, type, member) ({			\
 | 
			
		||||
	const typeof(((type *)0)->member) * __mptr = (ptr);	\
 | 
			
		||||
	(type *)((char *)__mptr - offsetof(type, member)); })
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#endif	/* _TOOLS_LINUX_CONTAINER_OF_H */
 | 
			
		||||
| 
						 | 
				
			
			@ -11,6 +11,7 @@
 | 
			
		|||
#include <linux/panic.h>
 | 
			
		||||
#include <endian.h>
 | 
			
		||||
#include <byteswap.h>
 | 
			
		||||
#include <linux/container_of.h>
 | 
			
		||||
 | 
			
		||||
#ifndef UINT_MAX
 | 
			
		||||
#define UINT_MAX	(~0U)
 | 
			
		||||
| 
						 | 
				
			
			@ -25,19 +26,6 @@
 | 
			
		|||
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifndef container_of
 | 
			
		||||
/**
 | 
			
		||||
 * container_of - cast a member of a structure out to the containing structure
 | 
			
		||||
 * @ptr:	the pointer to the member.
 | 
			
		||||
 * @type:	the type of the container struct this is embedded in.
 | 
			
		||||
 * @member:	the name of the member within the struct.
 | 
			
		||||
 *
 | 
			
		||||
 */
 | 
			
		||||
#define container_of(ptr, type, member) ({			\
 | 
			
		||||
	const typeof(((type *)0)->member) * __mptr = (ptr);	\
 | 
			
		||||
	(type *)((char *)__mptr - offsetof(type, member)); })
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifndef max
 | 
			
		||||
#define max(x, y) ({				\
 | 
			
		||||
	typeof(x) _max1 = (x);			\
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -72,4 +72,9 @@ static inline u64 mul_u64_u64_div64(u64 a, u64 b, u64 c)
 | 
			
		|||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static inline u64 div_u64(u64 dividend, u32 divisor)
 | 
			
		||||
{
 | 
			
		||||
	return dividend / divisor;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif /* _LINUX_MATH64_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										7
									
								
								tools/include/linux/moduleparam.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								tools/include/linux/moduleparam.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,7 @@
 | 
			
		|||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
			
		||||
#ifndef _TOOLS_LINUX_MODULE_PARAMS_H
 | 
			
		||||
#define _TOOLS_LINUX_MODULE_PARAMS_H
 | 
			
		||||
 | 
			
		||||
#define MODULE_PARM_DESC(parm, desc)
 | 
			
		||||
 | 
			
		||||
#endif // _TOOLS_LINUX_MODULE_PARAMS_H
 | 
			
		||||
							
								
								
									
										51
									
								
								tools/include/linux/prandom.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								tools/include/linux/prandom.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,51 @@
 | 
			
		|||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
			
		||||
#ifndef __TOOLS_LINUX_PRANDOM_H
 | 
			
		||||
#define __TOOLS_LINUX_PRANDOM_H
 | 
			
		||||
 | 
			
		||||
#include <linux/types.h>
 | 
			
		||||
 | 
			
		||||
struct rnd_state {
 | 
			
		||||
	__u32 s1, s2, s3, s4;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Handle minimum values for seeds
 | 
			
		||||
 */
 | 
			
		||||
static inline u32 __seed(u32 x, u32 m)
 | 
			
		||||
{
 | 
			
		||||
	return (x < m) ? x + m : x;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * prandom_seed_state - set seed for prandom_u32_state().
 | 
			
		||||
 * @state: pointer to state structure to receive the seed.
 | 
			
		||||
 * @seed: arbitrary 64-bit value to use as a seed.
 | 
			
		||||
 */
 | 
			
		||||
static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
 | 
			
		||||
{
 | 
			
		||||
	u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL;
 | 
			
		||||
 | 
			
		||||
	state->s1 = __seed(i,   2U);
 | 
			
		||||
	state->s2 = __seed(i,   8U);
 | 
			
		||||
	state->s3 = __seed(i,  16U);
 | 
			
		||||
	state->s4 = __seed(i, 128U);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 *	prandom_u32_state - seeded pseudo-random number generator.
 | 
			
		||||
 *	@state: pointer to state structure holding seeded state.
 | 
			
		||||
 *
 | 
			
		||||
 *	This is used for pseudo-randomness with no outside seeding.
 | 
			
		||||
 *	For more random results, use get_random_u32().
 | 
			
		||||
 */
 | 
			
		||||
static inline u32 prandom_u32_state(struct rnd_state *state)
 | 
			
		||||
{
 | 
			
		||||
#define TAUSWORTHE(s, a, b, c, d) (((s & c) << d) ^ (((s << a) ^ s) >> b))
 | 
			
		||||
	state->s1 = TAUSWORTHE(state->s1,  6U, 13U, 4294967294U, 18U);
 | 
			
		||||
	state->s2 = TAUSWORTHE(state->s2,  2U, 27U, 4294967288U,  2U);
 | 
			
		||||
	state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U,  7U);
 | 
			
		||||
	state->s4 = TAUSWORTHE(state->s4,  3U, 12U, 4294967168U, 13U);
 | 
			
		||||
 | 
			
		||||
	return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4);
 | 
			
		||||
}
 | 
			
		||||
#endif // __TOOLS_LINUX_PRANDOM_H
 | 
			
		||||
| 
						 | 
				
			
			@ -12,6 +12,7 @@
 | 
			
		|||
 | 
			
		||||
void *kmalloc(size_t size, gfp_t gfp);
 | 
			
		||||
void kfree(void *p);
 | 
			
		||||
void *kmalloc_array(size_t n, size_t size, gfp_t gfp);
 | 
			
		||||
 | 
			
		||||
bool slab_is_available(void);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -42,6 +42,8 @@ typedef __s16 s16;
 | 
			
		|||
typedef __u8  u8;
 | 
			
		||||
typedef __s8  s8;
 | 
			
		||||
 | 
			
		||||
typedef unsigned long long	ullong;
 | 
			
		||||
 | 
			
		||||
#ifdef __CHECKER__
 | 
			
		||||
#define __bitwise	__attribute__((bitwise))
 | 
			
		||||
#else
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -101,6 +101,26 @@ bool __bitmap_intersects(const unsigned long *bitmap1,
 | 
			
		|||
	return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __bitmap_set(unsigned long *map, unsigned int start, int len)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long *p = map + BIT_WORD(start);
 | 
			
		||||
	const unsigned int size = start + len;
 | 
			
		||||
	int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
 | 
			
		||||
	unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
 | 
			
		||||
 | 
			
		||||
	while (len - bits_to_set >= 0) {
 | 
			
		||||
		*p |= mask_to_set;
 | 
			
		||||
		len -= bits_to_set;
 | 
			
		||||
		bits_to_set = BITS_PER_LONG;
 | 
			
		||||
		mask_to_set = ~0UL;
 | 
			
		||||
		p++;
 | 
			
		||||
	}
 | 
			
		||||
	if (len) {
 | 
			
		||||
		mask_to_set &= BITMAP_LAST_WORD_MASK(size);
 | 
			
		||||
		*p |= mask_to_set;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __bitmap_clear(unsigned long *map, unsigned int start, int len)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long *p = map + BIT_WORD(start);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -36,3 +36,19 @@ void kfree(void *p)
 | 
			
		|||
		printf("Freeing %p to malloc\n", p);
 | 
			
		||||
	free(p);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *kmalloc_array(size_t n, size_t size, gfp_t gfp)
 | 
			
		||||
{
 | 
			
		||||
	void *ret;
 | 
			
		||||
 | 
			
		||||
	if (!(gfp & __GFP_DIRECT_RECLAIM))
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	ret = calloc(n, size);
 | 
			
		||||
	uatomic_inc(&kmalloc_nr_allocated);
 | 
			
		||||
	if (kmalloc_verbose)
 | 
			
		||||
		printf("Allocating %p from calloc\n", ret);
 | 
			
		||||
	if (gfp & __GFP_ZERO)
 | 
			
		||||
		memset(ret, 0, n * size);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										33
									
								
								tools/testing/rbtree/Makefile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								tools/testing/rbtree/Makefile
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,33 @@
 | 
			
		|||
# SPDX-License-Identifier: GPL-2.0
 | 
			
		||||
 | 
			
		||||
.PHONY: clean
 | 
			
		||||
 | 
			
		||||
TARGETS = rbtree_test interval_tree_test
 | 
			
		||||
OFILES = $(SHARED_OFILES) rbtree-shim.o interval_tree-shim.o maple-shim.o
 | 
			
		||||
DEPS = ../../../include/linux/rbtree.h \
 | 
			
		||||
	../../../include/linux/rbtree_types.h \
 | 
			
		||||
	../../../include/linux/rbtree_augmented.h \
 | 
			
		||||
	../../../include/linux/interval_tree.h \
 | 
			
		||||
	../../../include/linux/interval_tree_generic.h \
 | 
			
		||||
	../../../lib/rbtree.c \
 | 
			
		||||
	../../../lib/interval_tree.c
 | 
			
		||||
 | 
			
		||||
targets: $(TARGETS)
 | 
			
		||||
 | 
			
		||||
include ../shared/shared.mk
 | 
			
		||||
 | 
			
		||||
ifeq ($(DEBUG), 1)
 | 
			
		||||
	CFLAGS += -g
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
$(TARGETS):	$(OFILES)
 | 
			
		||||
 | 
			
		||||
rbtree-shim.o: $(DEPS)
 | 
			
		||||
rbtree_test.o:  ../../../lib/rbtree_test.c
 | 
			
		||||
interval_tree-shim.o: $(DEPS)
 | 
			
		||||
interval_tree-shim.o: CFLAGS += -DCONFIG_INTERVAL_TREE_SPAN_ITER
 | 
			
		||||
interval_tree_test.o: 	../../../lib/interval_tree_test.c
 | 
			
		||||
interval_tree_test.o: CFLAGS += -DCONFIG_INTERVAL_TREE_SPAN_ITER
 | 
			
		||||
 | 
			
		||||
clean:
 | 
			
		||||
	$(RM) $(TARGETS) *.o radix-tree.c idr.c generated/*
 | 
			
		||||
							
								
								
									
										58
									
								
								tools/testing/rbtree/interval_tree_test.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								tools/testing/rbtree/interval_tree_test.c
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,58 @@
 | 
			
		|||
// SPDX-License-Identifier: GPL-2.0
 | 
			
		||||
/*
 | 
			
		||||
 * interval_tree.c: Userspace Interval Tree test-suite
 | 
			
		||||
 * Copyright (c) 2025 Wei Yang <richard.weiyang@gmail.com>
 | 
			
		||||
 */
 | 
			
		||||
#include <linux/math64.h>
 | 
			
		||||
#include <linux/kern_levels.h>
 | 
			
		||||
#include "shared.h"
 | 
			
		||||
#include "maple-shared.h"
 | 
			
		||||
 | 
			
		||||
#include "../../../lib/interval_tree_test.c"
 | 
			
		||||
 | 
			
		||||
int usage(void)
 | 
			
		||||
{
 | 
			
		||||
	fprintf(stderr, "Userland interval tree test cases\n");
 | 
			
		||||
	fprintf(stderr, "  -n: Number of nodes in the interval tree\n");
 | 
			
		||||
	fprintf(stderr, "  -p: Number of iterations modifying the tree\n");
 | 
			
		||||
	fprintf(stderr, "  -q: Number of searches to the interval tree\n");
 | 
			
		||||
	fprintf(stderr, "  -s: Number of iterations searching the tree\n");
 | 
			
		||||
	fprintf(stderr, "  -a: Searches will iterate all nodes in the tree\n");
 | 
			
		||||
	fprintf(stderr, "  -m: Largest value for the interval's endpoint\n");
 | 
			
		||||
	fprintf(stderr, "  -r: Random seed\n");
 | 
			
		||||
	exit(-1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void interval_tree_tests(void)
 | 
			
		||||
{
 | 
			
		||||
	interval_tree_test_init();
 | 
			
		||||
	interval_tree_test_exit();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int main(int argc, char **argv)
 | 
			
		||||
{
 | 
			
		||||
	int opt;
 | 
			
		||||
 | 
			
		||||
	while ((opt = getopt(argc, argv, "n:p:q:s:am:r:")) != -1) {
 | 
			
		||||
		if (opt == 'n')
 | 
			
		||||
			nnodes = strtoul(optarg, NULL, 0);
 | 
			
		||||
		else if (opt == 'p')
 | 
			
		||||
			perf_loops = strtoul(optarg, NULL, 0);
 | 
			
		||||
		else if (opt == 'q')
 | 
			
		||||
			nsearches = strtoul(optarg, NULL, 0);
 | 
			
		||||
		else if (opt == 's')
 | 
			
		||||
			search_loops = strtoul(optarg, NULL, 0);
 | 
			
		||||
		else if (opt == 'a')
 | 
			
		||||
			search_all = true;
 | 
			
		||||
		else if (opt == 'm')
 | 
			
		||||
			max_endpoint = strtoul(optarg, NULL, 0);
 | 
			
		||||
		else if (opt == 'r')
 | 
			
		||||
			seed = strtoul(optarg, NULL, 0);
 | 
			
		||||
		else
 | 
			
		||||
			usage();
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	maple_tree_init();
 | 
			
		||||
	interval_tree_tests();
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										48
									
								
								tools/testing/rbtree/rbtree_test.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										48
									
								
								tools/testing/rbtree/rbtree_test.c
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,48 @@
 | 
			
		|||
// SPDX-License-Identifier: GPL-2.0
 | 
			
		||||
/*
 | 
			
		||||
 * rbtree_test.c: Userspace Red Black Tree test-suite
 | 
			
		||||
 * Copyright (c) 2025 Wei Yang <richard.weiyang@gmail.com>
 | 
			
		||||
 */
 | 
			
		||||
#include <linux/init.h>
 | 
			
		||||
#include <linux/math64.h>
 | 
			
		||||
#include <linux/kern_levels.h>
 | 
			
		||||
#include "shared.h"
 | 
			
		||||
 | 
			
		||||
#include "../../../lib/rbtree_test.c"
 | 
			
		||||
 | 
			
		||||
int usage(void)
 | 
			
		||||
{
 | 
			
		||||
	fprintf(stderr, "Userland rbtree test cases\n");
 | 
			
		||||
	fprintf(stderr, "  -n: Number of nodes in the rb-tree\n");
 | 
			
		||||
	fprintf(stderr, "  -p: Number of iterations modifying the rb-tree\n");
 | 
			
		||||
	fprintf(stderr, "  -c: Number of iterations modifying and verifying the rb-tree\n");
 | 
			
		||||
	fprintf(stderr, "  -r: Random seed\n");
 | 
			
		||||
	exit(-1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void rbtree_tests(void)
 | 
			
		||||
{
 | 
			
		||||
	rbtree_test_init();
 | 
			
		||||
	rbtree_test_exit();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int main(int argc, char **argv)
 | 
			
		||||
{
 | 
			
		||||
	int opt;
 | 
			
		||||
 | 
			
		||||
	while ((opt = getopt(argc, argv, "n:p:c:r:")) != -1) {
 | 
			
		||||
		if (opt == 'n')
 | 
			
		||||
			nnodes = strtoul(optarg, NULL, 0);
 | 
			
		||||
		else if (opt == 'p')
 | 
			
		||||
			perf_loops = strtoul(optarg, NULL, 0);
 | 
			
		||||
		else if (opt == 'c')
 | 
			
		||||
			check_loops = strtoul(optarg, NULL, 0);
 | 
			
		||||
		else if (opt == 'r')
 | 
			
		||||
			seed = strtoul(optarg, NULL, 0);
 | 
			
		||||
		else
 | 
			
		||||
			usage();
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rbtree_tests();
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										4
									
								
								tools/testing/rbtree/test.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								tools/testing/rbtree/test.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,4 @@
 | 
			
		|||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
			
		||||
 | 
			
		||||
void rbtree_tests(void);
 | 
			
		||||
void interval_tree_tests(void);
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
		Reference in a new issue