mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	dm table: clear add_random unless all devices have it set
Always clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not have it set. Otherwise devices with predictable characteristics may contribute entropy. QUEUE_FLAG_ADD_RANDOM specifies whether or not queue IO timings contribute to the random pool. For bio-based targets this flag is always 0 because such devices have no real queue. For request-based devices this flag was always set to 1 by default. Now set it according to the flags on underlying devices. If there is at least one device which should not contribute, set the flag to zero: If a device, such as fast SSD storage, is not suitable for supplying entropy, a request-based queue stacked over it will not be either. Because the checking logic is exactly same as for the rotational flag, share the iteration function with device_is_nonrot(). Signed-off-by: Milan Broz <mbroz@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: Alasdair G Kergon <agk@redhat.com>
This commit is contained in:
		
							parent
							
								
									ba1cbad93d
								
							
						
					
					
						commit
						c3c4555edd
					
				
					 1 changed files with 22 additions and 4 deletions
				
			
		| 
						 | 
				
			
			@ -1354,17 +1354,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
 | 
			
		|||
	return q && blk_queue_nonrot(q);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static bool dm_table_is_nonrot(struct dm_table *t)
 | 
			
		||||
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
 | 
			
		||||
			     sector_t start, sector_t len, void *data)
 | 
			
		||||
{
 | 
			
		||||
	struct request_queue *q = bdev_get_queue(dev->bdev);
 | 
			
		||||
 | 
			
		||||
	return q && !blk_queue_add_random(q);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static bool dm_table_all_devices_attribute(struct dm_table *t,
 | 
			
		||||
					   iterate_devices_callout_fn func)
 | 
			
		||||
{
 | 
			
		||||
	struct dm_target *ti;
 | 
			
		||||
	unsigned i = 0;
 | 
			
		||||
 | 
			
		||||
	/* Ensure that all underlying device are non-rotational. */
 | 
			
		||||
	while (i < dm_table_get_num_targets(t)) {
 | 
			
		||||
		ti = dm_table_get_target(t, i++);
 | 
			
		||||
 | 
			
		||||
		if (!ti->type->iterate_devices ||
 | 
			
		||||
		    !ti->type->iterate_devices(ti, device_is_nonrot, NULL))
 | 
			
		||||
		    !ti->type->iterate_devices(ti, func, NULL))
 | 
			
		||||
			return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1396,13 +1404,23 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 | 
			
		|||
	if (!dm_table_discard_zeroes_data(t))
 | 
			
		||||
		q->limits.discard_zeroes_data = 0;
 | 
			
		||||
 | 
			
		||||
	if (dm_table_is_nonrot(t))
 | 
			
		||||
	/* Ensure that all underlying devices are non-rotational. */
 | 
			
		||||
	if (dm_table_all_devices_attribute(t, device_is_nonrot))
 | 
			
		||||
		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
 | 
			
		||||
	else
 | 
			
		||||
		queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
 | 
			
		||||
 | 
			
		||||
	dm_table_set_integrity(t);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Determine whether or not this queue's I/O timings contribute
 | 
			
		||||
	 * to the entropy pool, Only request-based targets use this.
 | 
			
		||||
	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
 | 
			
		||||
	 * have it set.
 | 
			
		||||
	 */
 | 
			
		||||
	if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
 | 
			
		||||
		queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
 | 
			
		||||
	 * visible to other CPUs because, once the flag is set, incoming bios
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue