* Make SGX less likely to induce fatal machine checks

* Use much more compact SHA-256 library API
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEV76QKkVc4xCGURexaDWVMHDJkrAFAmg4oWMACgkQaDWVMHDJ
 krDmaQ/9ESbv6zhDZJDwBk2mO9fWKWsHVPjDSa9JdTZvfh/X4XDVc0cLbXI02D7H
 7yd0eouresljayhybsoPAbpWydepDXXP7bGfDQlC5zsXuPs7+I2gYRUHyTvu316Z
 7dQjTJ/QvlqEHGVa0SPt5cBj4pdCwd41uo/kFiEVI3a6EpbsgHZKPk83xchdXzE0
 Egy/evnDq1t1Fnc2Aq3/r87pHqSSCv5AHT8LYbQvW1mIURcp1Ik6FvmDdSPV9jhd
 QOTBjFHqh8Mmteqtxfl1/Uq0sa05dYvbiBHvawbC7spYe0VNhfpAfSULOBAHA5Mg
 scw+MoARj6LcDV0pOXKb36RI7UME6B8/uV0MVYEepRRwFfXnK/LlmAEYmh8XQg55
 IxsRHsj6fvnEVruuoeJDOKhR0wLMwIogmkPthfqj6hokDdipme2FMxZOwuLqtvwo
 bVB4Xrgjlfsab+t54bQFfYIbiVM/1sKfwEFRF1FbW5leLGHQhyzJ5oT6LKdqey5z
 6rZpWRATQuwLxwjfK6WeiY+p+k8dAHh/ngg5uXcXkD2xlKDnvlR+L1/cSixyyoaf
 peTCgXTZs21rOY4WMPx+SzwHWlMrOK7Umd3m3QwHzdIy7aWWtqlUBR3PoKq/7c1o
 6VZRMiVIUscJy+m6fap4ZyWgatvIRGoSkQMBCDoZcAZ4f/9QxRA=
 =h5H1
 -----END PGP SIGNATURE-----

Merge tag 'x86_sgx_for_6.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull Intel software guard extension (SGX) updates from Dave Hansen:
 "A couple of x86/sgx changes.

  The first one is a no-brainer to use the (simple) SHA-256 library.

  For the second one, some folks doing testing noticed that SGX systems
  under memory pressure were inducing fatal machine checks at pretty
  unnerving rates, despite the SGX code having _some_ awareness of
  memory poison.

  It turns out that the SGX reclaim path was not checking for poison
  _and_ it always accesses memory to copy it around. Make sure that
  poisoned pages are not reclaimed"

* tag 'x86_sgx_for_6.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/sgx: Prevent attempts to reclaim poisoned pages
  x86/sgx: Use SHA-256 library API instead of crypto_shash API
This commit is contained in:
Linus Torvalds 2025-05-29 21:13:17 -07:00
commit bbd9c366bf
4 changed files with 5 additions and 31 deletions

View file

@ -1858,8 +1858,7 @@ endchoice
config X86_SGX
bool "Software Guard eXtensions (SGX)"
depends on X86_64 && CPU_SUP_INTEL && X86_X2APIC
depends on CRYPTO=y
depends on CRYPTO_SHA256=y
select CRYPTO_LIB_SHA256
select MMU_NOTIFIER
select NUMA_KEEP_MEMINFO if NUMA
select XARRAY_MULTI

View file

@ -2,7 +2,6 @@
#ifndef __ARCH_SGX_DRIVER_H__
#define __ARCH_SGX_DRIVER_H__
#include <crypto/hash.h>
#include <linux/kref.h>
#include <linux/mmu_notifier.h>
#include <linux/radix-tree.h>

View file

@ -3,6 +3,7 @@
#include <asm/mman.h>
#include <asm/sgx.h>
#include <crypto/sha2.h>
#include <linux/mman.h>
#include <linux/delay.h>
#include <linux/file.h>
@ -463,31 +464,6 @@ static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
return ret;
}
static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
void *hash)
{
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
}
static int sgx_get_key_hash(const void *modulus, void *hash)
{
struct crypto_shash *tfm;
int ret;
tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
ret = __sgx_get_key_hash(tfm, modulus, hash);
crypto_free_shash(tfm);
return ret;
}
static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
void *token)
{
@ -523,9 +499,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
sgx_xfrm_reserved_mask)
return -EINVAL;
ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
if (ret)
return ret;
sha256(sigstruct->modulus, SGX_MODULUS_SIZE, (u8 *)mrsigner);
mutex_lock(&encl->lock);

View file

@ -720,6 +720,8 @@ int arch_memory_failure(unsigned long pfn, int flags)
goto out;
}
sgx_unmark_page_reclaimable(page);
/*
* TBD: Add additional plumbing to enable pre-emptive
* action for asynchronous poison notification. Until