mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	jfs: Convert metapage_writepage to metapage_write_folio
Implement writepages rather than writepage by using write_cache_pages() to call metapage_write_folio(). Use bio_add_folio_nofail() as we know we just allocated the bio. Replace the call to SetPageError (which is never checked) with a call to mapping_set_error (which ... might be checked somewhere?) Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com>
This commit is contained in:
		
							parent
							
								
									9b4b3f8441
								
							
						
					
					
						commit
						35474d52c6
					
				
					 1 changed files with 41 additions and 34 deletions
				
			
		|  | @ -4,6 +4,7 @@ | |||
|  *   Portions Copyright (C) Christoph Hellwig, 2001-2002 | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/blkdev.h> | ||||
| #include <linux/fs.h> | ||||
| #include <linux/mm.h> | ||||
| #include <linux/module.h> | ||||
|  | @ -321,23 +322,25 @@ static void last_write_complete(struct page *page) | |||
| 
 | ||||
| static void metapage_write_end_io(struct bio *bio) | ||||
| { | ||||
| 	struct page *page = bio->bi_private; | ||||
| 	struct folio *folio = bio->bi_private; | ||||
| 
 | ||||
| 	BUG_ON(!PagePrivate(page)); | ||||
| 	BUG_ON(!folio->private); | ||||
| 
 | ||||
| 	if (bio->bi_status) { | ||||
| 		int err = blk_status_to_errno(bio->bi_status); | ||||
| 		printk(KERN_ERR "metapage_write_end_io: I/O error\n"); | ||||
| 		SetPageError(page); | ||||
| 		mapping_set_error(folio->mapping, err); | ||||
| 	} | ||||
| 	dec_io(page, last_write_complete); | ||||
| 	dec_io(&folio->page, last_write_complete); | ||||
| 	bio_put(bio); | ||||
| } | ||||
| 
 | ||||
| static int metapage_writepage(struct page *page, struct writeback_control *wbc) | ||||
| static int metapage_write_folio(struct folio *folio, | ||||
| 		struct writeback_control *wbc, void *unused) | ||||
| { | ||||
| 	struct bio *bio = NULL; | ||||
| 	int block_offset;	/* block offset of mp within page */ | ||||
| 	struct inode *inode = page->mapping->host; | ||||
| 	struct inode *inode = folio->mapping->host; | ||||
| 	int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage; | ||||
| 	int len; | ||||
| 	int xlen; | ||||
|  | @ -353,14 +356,13 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) | |||
| 	int offset; | ||||
| 	int bad_blocks = 0; | ||||
| 
 | ||||
| 	page_start = (sector_t)page->index << | ||||
| 		     (PAGE_SHIFT - inode->i_blkbits); | ||||
| 	BUG_ON(!PageLocked(page)); | ||||
| 	BUG_ON(PageWriteback(page)); | ||||
| 	set_page_writeback(page); | ||||
| 	page_start = folio_pos(folio) >> inode->i_blkbits; | ||||
| 	BUG_ON(!folio_test_locked(folio)); | ||||
| 	BUG_ON(folio_test_writeback(folio)); | ||||
| 	folio_start_writeback(folio); | ||||
| 
 | ||||
| 	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { | ||||
| 		mp = page_to_mp(page, offset); | ||||
| 		mp = page_to_mp(&folio->page, offset); | ||||
| 
 | ||||
| 		if (!mp || !test_bit(META_dirty, &mp->flag)) | ||||
| 			continue; | ||||
|  | @ -389,22 +391,20 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) | |||
| 				continue; | ||||
| 			} | ||||
| 			/* Not contiguous */ | ||||
| 			if (bio_add_page(bio, page, bio_bytes, bio_offset) < | ||||
| 			    bio_bytes) | ||||
| 				goto add_failed; | ||||
| 			bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset); | ||||
| 			/*
 | ||||
| 			 * Increment counter before submitting i/o to keep | ||||
| 			 * count from hitting zero before we're through | ||||
| 			 */ | ||||
| 			inc_io(page); | ||||
| 			inc_io(&folio->page); | ||||
| 			if (!bio->bi_iter.bi_size) | ||||
| 				goto dump_bio; | ||||
| 			submit_bio(bio); | ||||
| 			nr_underway++; | ||||
| 			bio = NULL; | ||||
| 		} else | ||||
| 			inc_io(page); | ||||
| 		xlen = (PAGE_SIZE - offset) >> inode->i_blkbits; | ||||
| 			inc_io(&folio->page); | ||||
| 		xlen = (folio_size(folio) - offset) >> inode->i_blkbits; | ||||
| 		pblock = metapage_get_blocks(inode, lblock, &xlen); | ||||
| 		if (!pblock) { | ||||
| 			printk(KERN_ERR "JFS: metapage_get_blocks failed\n"); | ||||
|  | @ -420,7 +420,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) | |||
| 		bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS); | ||||
| 		bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); | ||||
| 		bio->bi_end_io = metapage_write_end_io; | ||||
| 		bio->bi_private = page; | ||||
| 		bio->bi_private = folio; | ||||
| 
 | ||||
| 		/* Don't call bio_add_page yet, we may add to this vec */ | ||||
| 		bio_offset = offset; | ||||
|  | @ -430,8 +430,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) | |||
| 		next_block = lblock + len; | ||||
| 	} | ||||
| 	if (bio) { | ||||
| 		if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes) | ||||
| 				goto add_failed; | ||||
| 		bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset); | ||||
| 		if (!bio->bi_iter.bi_size) | ||||
| 			goto dump_bio; | ||||
| 
 | ||||
|  | @ -439,34 +438,42 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) | |||
| 		nr_underway++; | ||||
| 	} | ||||
| 	if (redirty) | ||||
| 		redirty_page_for_writepage(wbc, page); | ||||
| 		folio_redirty_for_writepage(wbc, folio); | ||||
| 
 | ||||
| 	unlock_page(page); | ||||
| 	folio_unlock(folio); | ||||
| 
 | ||||
| 	if (bad_blocks) | ||||
| 		goto err_out; | ||||
| 
 | ||||
| 	if (nr_underway == 0) | ||||
| 		end_page_writeback(page); | ||||
| 		folio_end_writeback(folio); | ||||
| 
 | ||||
| 	return 0; | ||||
| add_failed: | ||||
| 	/* We should never reach here, since we're only adding one vec */ | ||||
| 	printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n"); | ||||
| 	goto skip; | ||||
| dump_bio: | ||||
| 	print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16, | ||||
| 		       4, bio, sizeof(*bio), 0); | ||||
| skip: | ||||
| 	bio_put(bio); | ||||
| 	unlock_page(page); | ||||
| 	dec_io(page, last_write_complete); | ||||
| 	folio_unlock(folio); | ||||
| 	dec_io(&folio->page, last_write_complete); | ||||
| err_out: | ||||
| 	while (bad_blocks--) | ||||
| 		dec_io(page, last_write_complete); | ||||
| 		dec_io(&folio->page, last_write_complete); | ||||
| 	return -EIO; | ||||
| } | ||||
| 
 | ||||
| static int metapage_writepages(struct address_space *mapping, | ||||
| 		struct writeback_control *wbc) | ||||
| { | ||||
| 	struct blk_plug plug; | ||||
| 	int err; | ||||
| 
 | ||||
| 	blk_start_plug(&plug); | ||||
| 	err = write_cache_pages(mapping, wbc, metapage_write_folio, NULL); | ||||
| 	blk_finish_plug(&plug); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static int metapage_read_folio(struct file *fp, struct folio *folio) | ||||
| { | ||||
| 	struct inode *inode = folio->mapping->host; | ||||
|  | @ -556,7 +563,7 @@ static void metapage_invalidate_folio(struct folio *folio, size_t offset, | |||
| 
 | ||||
| const struct address_space_operations jfs_metapage_aops = { | ||||
| 	.read_folio	= metapage_read_folio, | ||||
| 	.writepage	= metapage_writepage, | ||||
| 	.writepages	= metapage_writepages, | ||||
| 	.release_folio	= metapage_release_folio, | ||||
| 	.invalidate_folio = metapage_invalidate_folio, | ||||
| 	.dirty_folio	= filemap_dirty_folio, | ||||
|  | @ -698,7 +705,7 @@ static int metapage_write_one(struct page *page) | |||
| 
 | ||||
| 	if (folio_clear_dirty_for_io(folio)) { | ||||
| 		folio_get(folio); | ||||
| 		ret = metapage_writepage(page, &wbc); | ||||
| 		ret = metapage_write_folio(folio, &wbc, NULL); | ||||
| 		if (ret == 0) | ||||
| 			folio_wait_writeback(folio); | ||||
| 		folio_put(folio); | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Matthew Wilcox (Oracle)
						Matthew Wilcox (Oracle)