mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	page cache: Rearrange address_space
Change i_pages from a radix_tree_root to an xarray, convert the documentation into kernel-doc format and change the order of the elements to pack them better on 64-bit systems. Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
		
							parent
							
								
									f32f004cdd
								
							
						
					
					
						commit
						eb797a8ee0
					
				
					 1 changed files with 31 additions and 15 deletions
				
			
		| 
						 | 
					@ -403,24 +403,40 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
 | 
				
			||||||
				loff_t pos, unsigned len, unsigned copied,
 | 
									loff_t pos, unsigned len, unsigned copied,
 | 
				
			||||||
				struct page *page, void *fsdata);
 | 
									struct page *page, void *fsdata);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * struct address_space - Contents of a cacheable, mappable object.
 | 
				
			||||||
 | 
					 * @host: Owner, either the inode or the block_device.
 | 
				
			||||||
 | 
					 * @i_pages: Cached pages.
 | 
				
			||||||
 | 
					 * @gfp_mask: Memory allocation flags to use for allocating pages.
 | 
				
			||||||
 | 
					 * @i_mmap_writable: Number of VM_SHARED mappings.
 | 
				
			||||||
 | 
					 * @i_mmap: Tree of private and shared mappings.
 | 
				
			||||||
 | 
					 * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
 | 
				
			||||||
 | 
					 * @nrpages: Number of page entries, protected by the i_pages lock.
 | 
				
			||||||
 | 
					 * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock.
 | 
				
			||||||
 | 
					 * @writeback_index: Writeback starts here.
 | 
				
			||||||
 | 
					 * @a_ops: Methods.
 | 
				
			||||||
 | 
					 * @flags: Error bits and flags (AS_*).
 | 
				
			||||||
 | 
					 * @wb_err: The most recent error which has occurred.
 | 
				
			||||||
 | 
					 * @private_lock: For use by the owner of the address_space.
 | 
				
			||||||
 | 
					 * @private_list: For use by the owner of the address_space.
 | 
				
			||||||
 | 
					 * @private_data: For use by the owner of the address_space.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
struct address_space {
 | 
					struct address_space {
 | 
				
			||||||
	struct inode		*host;		/* owner: inode, block_device */
 | 
						struct inode		*host;
 | 
				
			||||||
	struct radix_tree_root	i_pages;	/* cached pages */
 | 
						struct xarray		i_pages;
 | 
				
			||||||
	atomic_t		i_mmap_writable;/* count VM_SHARED mappings */
 | 
						gfp_t			gfp_mask;
 | 
				
			||||||
	struct rb_root_cached	i_mmap;		/* tree of private and shared mappings */
 | 
						atomic_t		i_mmap_writable;
 | 
				
			||||||
	struct rw_semaphore	i_mmap_rwsem;	/* protect tree, count, list */
 | 
						struct rb_root_cached	i_mmap;
 | 
				
			||||||
	/* Protected by the i_pages lock */
 | 
						struct rw_semaphore	i_mmap_rwsem;
 | 
				
			||||||
	unsigned long		nrpages;	/* number of total pages */
 | 
						unsigned long		nrpages;
 | 
				
			||||||
	/* number of shadow or DAX exceptional entries */
 | 
					 | 
				
			||||||
	unsigned long		nrexceptional;
 | 
						unsigned long		nrexceptional;
 | 
				
			||||||
	pgoff_t			writeback_index;/* writeback starts here */
 | 
						pgoff_t			writeback_index;
 | 
				
			||||||
	const struct address_space_operations *a_ops;	/* methods */
 | 
						const struct address_space_operations *a_ops;
 | 
				
			||||||
	unsigned long		flags;		/* error bits */
 | 
						unsigned long		flags;
 | 
				
			||||||
	spinlock_t		private_lock;	/* for use by the address_space */
 | 
					 | 
				
			||||||
	gfp_t			gfp_mask;	/* implicit gfp mask for allocations */
 | 
					 | 
				
			||||||
	struct list_head	private_list;	/* for use by the address_space */
 | 
					 | 
				
			||||||
	void			*private_data;	/* ditto */
 | 
					 | 
				
			||||||
	errseq_t		wb_err;
 | 
						errseq_t		wb_err;
 | 
				
			||||||
 | 
						spinlock_t		private_lock;
 | 
				
			||||||
 | 
						struct list_head	private_list;
 | 
				
			||||||
 | 
						void			*private_data;
 | 
				
			||||||
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
 | 
					} __attribute__((aligned(sizeof(long)))) __randomize_layout;
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * On most architectures that alignment is already the case; but
 | 
						 * On most architectures that alignment is already the case; but
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue