mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	ext4 crypto: don't let data integrity writebacks fail with ENOMEM
We don't want the writeback triggered from the journal commit (in data=writeback mode) to cause the journal to abort due to generic_writepages() returning an ENOMEM error. In addition, if fsync() fails with ENOMEM, most applications will probably not do the right thing. So if we are doing a data integrity sync, and ext4_encrypt() returns ENOMEM, we will submit any queued I/O to date, and then retry the allocation using GFP_NOFAIL. Google-Bug-Id: 27641567 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
This commit is contained in:
		
							parent
							
								
									9e92f48c34
								
							
						
					
					
						commit
						c9af28fdd4
					
				
					 4 changed files with 39 additions and 20 deletions
				
			
		| 
						 | 
					@ -91,7 +91,8 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
 | 
				
			||||||
 * Return: An allocated and initialized encryption context on success; error
 | 
					 * Return: An allocated and initialized encryption context on success; error
 | 
				
			||||||
 * value or NULL otherwise.
 | 
					 * value or NULL otherwise.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
 | 
					struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
 | 
				
			||||||
 | 
										    gfp_t gfp_flags)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct ext4_crypto_ctx *ctx = NULL;
 | 
						struct ext4_crypto_ctx *ctx = NULL;
 | 
				
			||||||
	int res = 0;
 | 
						int res = 0;
 | 
				
			||||||
| 
						 | 
					@ -118,7 +119,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
 | 
				
			||||||
		list_del(&ctx->free_list);
 | 
							list_del(&ctx->free_list);
 | 
				
			||||||
	spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
 | 
						spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
 | 
				
			||||||
	if (!ctx) {
 | 
						if (!ctx) {
 | 
				
			||||||
		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
 | 
							ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags);
 | 
				
			||||||
		if (!ctx) {
 | 
							if (!ctx) {
 | 
				
			||||||
			res = -ENOMEM;
 | 
								res = -ENOMEM;
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
| 
						 | 
					@ -255,7 +256,8 @@ static int ext4_page_crypto(struct inode *inode,
 | 
				
			||||||
			    ext4_direction_t rw,
 | 
								    ext4_direction_t rw,
 | 
				
			||||||
			    pgoff_t index,
 | 
								    pgoff_t index,
 | 
				
			||||||
			    struct page *src_page,
 | 
								    struct page *src_page,
 | 
				
			||||||
			    struct page *dest_page)
 | 
								    struct page *dest_page,
 | 
				
			||||||
 | 
								    gfp_t gfp_flags)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
 | 
						u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
 | 
				
			||||||
| 
						 | 
					@ -266,7 +268,7 @@ static int ext4_page_crypto(struct inode *inode,
 | 
				
			||||||
	struct crypto_skcipher *tfm = ci->ci_ctfm;
 | 
						struct crypto_skcipher *tfm = ci->ci_ctfm;
 | 
				
			||||||
	int res = 0;
 | 
						int res = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	req = skcipher_request_alloc(tfm, GFP_NOFS);
 | 
						req = skcipher_request_alloc(tfm, gfp_flags);
 | 
				
			||||||
	if (!req) {
 | 
						if (!req) {
 | 
				
			||||||
		printk_ratelimited(KERN_ERR
 | 
							printk_ratelimited(KERN_ERR
 | 
				
			||||||
				   "%s: crypto_request_alloc() failed\n",
 | 
									   "%s: crypto_request_alloc() failed\n",
 | 
				
			||||||
| 
						 | 
					@ -307,9 +309,10 @@ static int ext4_page_crypto(struct inode *inode,
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
 | 
					static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx,
 | 
				
			||||||
 | 
									      gfp_t gfp_flags)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
 | 
						ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags);
 | 
				
			||||||
	if (ctx->w.bounce_page == NULL)
 | 
						if (ctx->w.bounce_page == NULL)
 | 
				
			||||||
		return ERR_PTR(-ENOMEM);
 | 
							return ERR_PTR(-ENOMEM);
 | 
				
			||||||
	ctx->flags |= EXT4_WRITE_PATH_FL;
 | 
						ctx->flags |= EXT4_WRITE_PATH_FL;
 | 
				
			||||||
| 
						 | 
					@ -332,7 +335,8 @@ static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
 | 
				
			||||||
 * error value or NULL.
 | 
					 * error value or NULL.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct page *ext4_encrypt(struct inode *inode,
 | 
					struct page *ext4_encrypt(struct inode *inode,
 | 
				
			||||||
			  struct page *plaintext_page)
 | 
								  struct page *plaintext_page,
 | 
				
			||||||
 | 
								  gfp_t gfp_flags)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct ext4_crypto_ctx *ctx;
 | 
						struct ext4_crypto_ctx *ctx;
 | 
				
			||||||
	struct page *ciphertext_page = NULL;
 | 
						struct page *ciphertext_page = NULL;
 | 
				
			||||||
| 
						 | 
					@ -340,17 +344,17 @@ struct page *ext4_encrypt(struct inode *inode,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	BUG_ON(!PageLocked(plaintext_page));
 | 
						BUG_ON(!PageLocked(plaintext_page));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ctx = ext4_get_crypto_ctx(inode);
 | 
						ctx = ext4_get_crypto_ctx(inode, gfp_flags);
 | 
				
			||||||
	if (IS_ERR(ctx))
 | 
						if (IS_ERR(ctx))
 | 
				
			||||||
		return (struct page *) ctx;
 | 
							return (struct page *) ctx;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* The encryption operation will require a bounce page. */
 | 
						/* The encryption operation will require a bounce page. */
 | 
				
			||||||
	ciphertext_page = alloc_bounce_page(ctx);
 | 
						ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
 | 
				
			||||||
	if (IS_ERR(ciphertext_page))
 | 
						if (IS_ERR(ciphertext_page))
 | 
				
			||||||
		goto errout;
 | 
							goto errout;
 | 
				
			||||||
	ctx->w.control_page = plaintext_page;
 | 
						ctx->w.control_page = plaintext_page;
 | 
				
			||||||
	err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
 | 
						err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
 | 
				
			||||||
			       plaintext_page, ciphertext_page);
 | 
								       plaintext_page, ciphertext_page, gfp_flags);
 | 
				
			||||||
	if (err) {
 | 
						if (err) {
 | 
				
			||||||
		ciphertext_page = ERR_PTR(err);
 | 
							ciphertext_page = ERR_PTR(err);
 | 
				
			||||||
	errout:
 | 
						errout:
 | 
				
			||||||
| 
						 | 
					@ -378,8 +382,8 @@ int ext4_decrypt(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	BUG_ON(!PageLocked(page));
 | 
						BUG_ON(!PageLocked(page));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return ext4_page_crypto(page->mapping->host,
 | 
						return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT,
 | 
				
			||||||
				EXT4_DECRYPT, page->index, page, page);
 | 
									page->index, page, page, GFP_NOFS);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
 | 
					int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
 | 
				
			||||||
| 
						 | 
					@ -398,11 +402,11 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
 | 
						BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ctx = ext4_get_crypto_ctx(inode);
 | 
						ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
 | 
				
			||||||
	if (IS_ERR(ctx))
 | 
						if (IS_ERR(ctx))
 | 
				
			||||||
		return PTR_ERR(ctx);
 | 
							return PTR_ERR(ctx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ciphertext_page = alloc_bounce_page(ctx);
 | 
						ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
 | 
				
			||||||
	if (IS_ERR(ciphertext_page)) {
 | 
						if (IS_ERR(ciphertext_page)) {
 | 
				
			||||||
		err = PTR_ERR(ciphertext_page);
 | 
							err = PTR_ERR(ciphertext_page);
 | 
				
			||||||
		goto errout;
 | 
							goto errout;
 | 
				
			||||||
| 
						 | 
					@ -410,11 +414,12 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (len--) {
 | 
						while (len--) {
 | 
				
			||||||
		err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
 | 
							err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
 | 
				
			||||||
				       ZERO_PAGE(0), ciphertext_page);
 | 
									       ZERO_PAGE(0), ciphertext_page,
 | 
				
			||||||
 | 
									       GFP_NOFS);
 | 
				
			||||||
		if (err)
 | 
							if (err)
 | 
				
			||||||
			goto errout;
 | 
								goto errout;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		bio = bio_alloc(GFP_KERNEL, 1);
 | 
							bio = bio_alloc(GFP_NOWAIT, 1);
 | 
				
			||||||
		if (!bio) {
 | 
							if (!bio) {
 | 
				
			||||||
			err = -ENOMEM;
 | 
								err = -ENOMEM;
 | 
				
			||||||
			goto errout;
 | 
								goto errout;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2282,11 +2282,13 @@ extern struct kmem_cache *ext4_crypt_info_cachep;
 | 
				
			||||||
bool ext4_valid_contents_enc_mode(uint32_t mode);
 | 
					bool ext4_valid_contents_enc_mode(uint32_t mode);
 | 
				
			||||||
uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
 | 
					uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
 | 
				
			||||||
extern struct workqueue_struct *ext4_read_workqueue;
 | 
					extern struct workqueue_struct *ext4_read_workqueue;
 | 
				
			||||||
struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode);
 | 
					struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
 | 
				
			||||||
 | 
										    gfp_t gfp_flags);
 | 
				
			||||||
void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
 | 
					void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
 | 
				
			||||||
void ext4_restore_control_page(struct page *data_page);
 | 
					void ext4_restore_control_page(struct page *data_page);
 | 
				
			||||||
struct page *ext4_encrypt(struct inode *inode,
 | 
					struct page *ext4_encrypt(struct inode *inode,
 | 
				
			||||||
			  struct page *plaintext_page);
 | 
								  struct page *plaintext_page,
 | 
				
			||||||
 | 
								  gfp_t gfp_flags);
 | 
				
			||||||
int ext4_decrypt(struct page *page);
 | 
					int ext4_decrypt(struct page *page);
 | 
				
			||||||
int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
 | 
					int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
 | 
				
			||||||
			   ext4_fsblk_t pblk, ext4_lblk_t len);
 | 
								   ext4_fsblk_t pblk, ext4_lblk_t len);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -23,6 +23,7 @@
 | 
				
			||||||
#include <linux/kernel.h>
 | 
					#include <linux/kernel.h>
 | 
				
			||||||
#include <linux/slab.h>
 | 
					#include <linux/slab.h>
 | 
				
			||||||
#include <linux/mm.h>
 | 
					#include <linux/mm.h>
 | 
				
			||||||
 | 
					#include <linux/backing-dev.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "ext4_jbd2.h"
 | 
					#include "ext4_jbd2.h"
 | 
				
			||||||
#include "xattr.h"
 | 
					#include "xattr.h"
 | 
				
			||||||
| 
						 | 
					@ -470,9 +471,20 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
 | 
						if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
 | 
				
			||||||
	    nr_to_submit) {
 | 
						    nr_to_submit) {
 | 
				
			||||||
		data_page = ext4_encrypt(inode, page);
 | 
							gfp_t gfp_flags = GFP_NOFS;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						retry_encrypt:
 | 
				
			||||||
 | 
							data_page = ext4_encrypt(inode, page, gfp_flags);
 | 
				
			||||||
		if (IS_ERR(data_page)) {
 | 
							if (IS_ERR(data_page)) {
 | 
				
			||||||
			ret = PTR_ERR(data_page);
 | 
								ret = PTR_ERR(data_page);
 | 
				
			||||||
 | 
								if (ret == ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
 | 
				
			||||||
 | 
									if (io->io_bio) {
 | 
				
			||||||
 | 
										ext4_io_submit(io);
 | 
				
			||||||
 | 
										congestion_wait(BLK_RW_ASYNC, HZ/50);
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
									gfp_flags |= __GFP_NOFAIL;
 | 
				
			||||||
 | 
									goto retry_encrypt;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
			data_page = NULL;
 | 
								data_page = NULL;
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -279,7 +279,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (ext4_encrypted_inode(inode) &&
 | 
								if (ext4_encrypted_inode(inode) &&
 | 
				
			||||||
			    S_ISREG(inode->i_mode)) {
 | 
								    S_ISREG(inode->i_mode)) {
 | 
				
			||||||
				ctx = ext4_get_crypto_ctx(inode);
 | 
									ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
 | 
				
			||||||
				if (IS_ERR(ctx))
 | 
									if (IS_ERR(ctx))
 | 
				
			||||||
					goto set_error_page;
 | 
										goto set_error_page;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue