forked from mirrors/linux
		
	x86/mce: Fix siginfo_t->si_addr value for non-recoverable memory faults
In commit dad1743e59 ("x86/mce: Only restart instruction after machine
check recovery if it is safe") we fixed mce_notify_process() to force a
signal to the current process if it was not restartable (RIPV bit not
set in MCG_STATUS). But doing it here means that the process doesn't
get told the virtual address of the fault via siginfo_t->si_addr. This
would prevent application level recovery from the fault.
Make a new MF_MUST_KILL flag bit for memory_failure() et al. to use so
that we will provide the right information with the signal.
Signed-off-by: Tony Luck <tony.luck@intel.com>
Acked-by: Borislav Petkov <borislav.petkov@amd.com>
Cc: stable@kernel.org    # 3.4+
			
			
This commit is contained in:
		
							parent
							
								
									6887a4131d
								
							
						
					
					
						commit
						6751ed65dc
					
				
					 3 changed files with 13 additions and 8 deletions
				
			
		|  | @ -1186,6 +1186,7 @@ void mce_notify_process(void) | ||||||
| { | { | ||||||
| 	unsigned long pfn; | 	unsigned long pfn; | ||||||
| 	struct mce_info *mi = mce_find_info(); | 	struct mce_info *mi = mce_find_info(); | ||||||
|  | 	int flags = MF_ACTION_REQUIRED; | ||||||
| 
 | 
 | ||||||
| 	if (!mi) | 	if (!mi) | ||||||
| 		mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL); | 		mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL); | ||||||
|  | @ -1200,8 +1201,9 @@ void mce_notify_process(void) | ||||||
| 	 * doomed. We still need to mark the page as poisoned and alert any | 	 * doomed. We still need to mark the page as poisoned and alert any | ||||||
| 	 * other users of the page. | 	 * other users of the page. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 || | 	if (!mi->restartable) | ||||||
| 			   mi->restartable == 0) { | 		flags |= MF_MUST_KILL; | ||||||
|  | 	if (memory_failure(pfn, MCE_VECTOR, flags) < 0) { | ||||||
| 		pr_err("Memory error not recovered"); | 		pr_err("Memory error not recovered"); | ||||||
| 		force_sig(SIGBUS, current); | 		force_sig(SIGBUS, current); | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -1591,6 +1591,7 @@ void vmemmap_populate_print_last(void); | ||||||
| enum mf_flags { | enum mf_flags { | ||||||
| 	MF_COUNT_INCREASED = 1 << 0, | 	MF_COUNT_INCREASED = 1 << 0, | ||||||
| 	MF_ACTION_REQUIRED = 1 << 1, | 	MF_ACTION_REQUIRED = 1 << 1, | ||||||
|  | 	MF_MUST_KILL = 1 << 2, | ||||||
| }; | }; | ||||||
| extern int memory_failure(unsigned long pfn, int trapno, int flags); | extern int memory_failure(unsigned long pfn, int trapno, int flags); | ||||||
| extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); | extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); | ||||||
|  |  | ||||||
|  | @ -345,14 +345,14 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, | ||||||
|  * Also when FAIL is set do a force kill because something went |  * Also when FAIL is set do a force kill because something went | ||||||
|  * wrong earlier. |  * wrong earlier. | ||||||
|  */ |  */ | ||||||
| static void kill_procs(struct list_head *to_kill, int doit, int trapno, | static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, | ||||||
| 			  int fail, struct page *page, unsigned long pfn, | 			  int fail, struct page *page, unsigned long pfn, | ||||||
| 			  int flags) | 			  int flags) | ||||||
| { | { | ||||||
| 	struct to_kill *tk, *next; | 	struct to_kill *tk, *next; | ||||||
| 
 | 
 | ||||||
| 	list_for_each_entry_safe (tk, next, to_kill, nd) { | 	list_for_each_entry_safe (tk, next, to_kill, nd) { | ||||||
| 		if (doit) { | 		if (forcekill) { | ||||||
| 			/*
 | 			/*
 | ||||||
| 			 * In case something went wrong with munmapping | 			 * In case something went wrong with munmapping | ||||||
| 			 * make sure the process doesn't catch the | 			 * make sure the process doesn't catch the | ||||||
|  | @ -858,7 +858,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | ||||||
| 	struct address_space *mapping; | 	struct address_space *mapping; | ||||||
| 	LIST_HEAD(tokill); | 	LIST_HEAD(tokill); | ||||||
| 	int ret; | 	int ret; | ||||||
| 	int kill = 1; | 	int kill = 1, forcekill; | ||||||
| 	struct page *hpage = compound_head(p); | 	struct page *hpage = compound_head(p); | ||||||
| 	struct page *ppage; | 	struct page *ppage; | ||||||
| 
 | 
 | ||||||
|  | @ -888,7 +888,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | ||||||
| 	 * be called inside page lock (it's recommended but not enforced). | 	 * be called inside page lock (it's recommended but not enforced). | ||||||
| 	 */ | 	 */ | ||||||
| 	mapping = page_mapping(hpage); | 	mapping = page_mapping(hpage); | ||||||
| 	if (!PageDirty(hpage) && mapping && | 	if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && | ||||||
| 	    mapping_cap_writeback_dirty(mapping)) { | 	    mapping_cap_writeback_dirty(mapping)) { | ||||||
| 		if (page_mkclean(hpage)) { | 		if (page_mkclean(hpage)) { | ||||||
| 			SetPageDirty(hpage); | 			SetPageDirty(hpage); | ||||||
|  | @ -965,12 +965,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | ||||||
| 	 * Now that the dirty bit has been propagated to the | 	 * Now that the dirty bit has been propagated to the | ||||||
| 	 * struct page and all unmaps done we can decide if | 	 * struct page and all unmaps done we can decide if | ||||||
| 	 * killing is needed or not.  Only kill when the page | 	 * killing is needed or not.  Only kill when the page | ||||||
| 	 * was dirty, otherwise the tokill list is merely | 	 * was dirty or the process is not restartable, | ||||||
|  | 	 * otherwise the tokill list is merely | ||||||
| 	 * freed.  When there was a problem unmapping earlier | 	 * freed.  When there was a problem unmapping earlier | ||||||
| 	 * use a more force-full uncatchable kill to prevent | 	 * use a more force-full uncatchable kill to prevent | ||||||
| 	 * any accesses to the poisoned memory. | 	 * any accesses to the poisoned memory. | ||||||
| 	 */ | 	 */ | ||||||
| 	kill_procs(&tokill, !!PageDirty(ppage), trapno, | 	forcekill = PageDirty(ppage) || (flags & MF_MUST_KILL); | ||||||
|  | 	kill_procs(&tokill, forcekill, trapno, | ||||||
| 		      ret != SWAP_SUCCESS, p, pfn, flags); | 		      ret != SWAP_SUCCESS, p, pfn, flags); | ||||||
| 
 | 
 | ||||||
| 	return ret; | 	return ret; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Tony Luck
						Tony Luck