mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	ceph: check folio PG_private bit instead of folio->private
The pages in the file mapping maybe reclaimed and reused by other subsystems and the page->private maybe used as flags field or something else, if later that pages are used by page caches again the page->private maybe not cleared as expected. Here will check the PG_private bit instead of the folio->private. Cc: stable@vger.kernel.org URL: https://tracker.ceph.com/issues/55421 Signed-off-by: Xiubo Li <xiubli@redhat.com> Reviewed-by: Luis Henriques <lhenriques@suse.de> Reviewed-by: Jeff Layton <jlayton@kernel.org> Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
		
							parent
							
								
									620239d9a3
								
							
						
					
					
						commit
						642d51fb07
					
				
					 1 changed files with 7 additions and 4 deletions
				
			
		| 
						 | 
				
			
			@ -85,7 +85,7 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
 | 
			
		|||
	if (folio_test_dirty(folio)) {
 | 
			
		||||
		dout("%p dirty_folio %p idx %lu -- already dirty\n",
 | 
			
		||||
		     mapping->host, folio, folio->index);
 | 
			
		||||
		BUG_ON(!folio_get_private(folio));
 | 
			
		||||
		VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -122,7 +122,7 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
 | 
			
		|||
	 * Reference snap context in folio->private.  Also set
 | 
			
		||||
	 * PagePrivate so that we get invalidate_folio callback.
 | 
			
		||||
	 */
 | 
			
		||||
	BUG_ON(folio_get_private(folio));
 | 
			
		||||
	VM_BUG_ON_FOLIO(folio_test_private(folio), folio);
 | 
			
		||||
	folio_attach_private(folio, snapc);
 | 
			
		||||
 | 
			
		||||
	return ceph_fscache_dirty_folio(mapping, folio);
 | 
			
		||||
| 
						 | 
				
			
			@ -150,7 +150,7 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	WARN_ON(!folio_test_locked(folio));
 | 
			
		||||
	if (folio_get_private(folio)) {
 | 
			
		||||
	if (folio_test_private(folio)) {
 | 
			
		||||
		dout("%p invalidate_folio idx %lu full dirty page\n",
 | 
			
		||||
		     inode, folio->index);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -729,8 +729,11 @@ static void writepages_finish(struct ceph_osd_request *req)
 | 
			
		|||
 | 
			
		||||
	/* clean all pages */
 | 
			
		||||
	for (i = 0; i < req->r_num_ops; i++) {
 | 
			
		||||
		if (req->r_ops[i].op != CEPH_OSD_OP_WRITE)
 | 
			
		||||
		if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
 | 
			
		||||
			pr_warn("%s incorrect op %d req %p index %d tid %llu\n",
 | 
			
		||||
				__func__, req->r_ops[i].op, req, i, req->r_tid);
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		osd_data = osd_req_op_extent_osd_data(req, i);
 | 
			
		||||
		BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue